repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
junkTzu/kernel-MB860 | drivers/video/p9100.c | 8175 | 9214 | /* p9100.c: P9100 frame buffer driver
*
* Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
* Copyright 1999 Derrick J Brashear (shadow@dementia.org)
*
* Driver layout based loosely on tgafb.c, see that file for credits.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/fbio.h>
#include "sbuslib.h"
/*
* Local functions.
*/
static int p9100_setcolreg(unsigned, unsigned, unsigned, unsigned,
unsigned, struct fb_info *);
static int p9100_blank(int, struct fb_info *);
static int p9100_mmap(struct fb_info *, struct vm_area_struct *);
static int p9100_ioctl(struct fb_info *, unsigned int, unsigned long);
/*
* Frame buffer operations
*/
static struct fb_ops p9100_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = p9100_setcolreg,
.fb_blank = p9100_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = p9100_mmap,
.fb_ioctl = p9100_ioctl,
#ifdef CONFIG_COMPAT
.fb_compat_ioctl = sbusfb_compat_ioctl,
#endif
};
/* P9100 control registers */
#define P9100_SYSCTL_OFF 0x0UL
#define P9100_VIDEOCTL_OFF 0x100UL
#define P9100_VRAMCTL_OFF 0x180UL
#define P9100_RAMDAC_OFF 0x200UL
#define P9100_VIDEOCOPROC_OFF 0x400UL
/* P9100 command registers */
#define P9100_CMD_OFF 0x0UL
/* P9100 framebuffer memory */
#define P9100_FB_OFF 0x0UL
/* 3 bits: 2=8bpp 3=16bpp 5=32bpp 7=24bpp */
#define SYS_CONFIG_PIXELSIZE_SHIFT 26
#define SCREENPAINT_TIMECTL1_ENABLE_VIDEO 0x20 /* 0 = off, 1 = on */
struct p9100_regs {
/* Registers for the system control */
u32 sys_base;
u32 sys_config;
u32 sys_intr;
u32 sys_int_ena;
u32 sys_alt_rd;
u32 sys_alt_wr;
u32 sys_xxx[58];
/* Registers for the video control */
u32 vid_base;
u32 vid_hcnt;
u32 vid_htotal;
u32 vid_hsync_rise;
u32 vid_hblank_rise;
u32 vid_hblank_fall;
u32 vid_hcnt_preload;
u32 vid_vcnt;
u32 vid_vlen;
u32 vid_vsync_rise;
u32 vid_vblank_rise;
u32 vid_vblank_fall;
u32 vid_vcnt_preload;
u32 vid_screenpaint_addr;
u32 vid_screenpaint_timectl1;
u32 vid_screenpaint_qsfcnt;
u32 vid_screenpaint_timectl2;
u32 vid_xxx[15];
/* Registers for the video control */
u32 vram_base;
u32 vram_memcfg;
u32 vram_refresh_pd;
u32 vram_refresh_cnt;
u32 vram_raslo_max;
u32 vram_raslo_cur;
u32 pwrup_cfg;
u32 vram_xxx[25];
/* Registers for IBM RGB528 Palette */
u32 ramdac_cmap_wridx;
u32 ramdac_palette_data;
u32 ramdac_pixel_mask;
u32 ramdac_palette_rdaddr;
u32 ramdac_idx_lo;
u32 ramdac_idx_hi;
u32 ramdac_idx_data;
u32 ramdac_idx_ctl;
u32 ramdac_xxx[1784];
};
struct p9100_cmd_parameng {
u32 parameng_status;
u32 parameng_bltcmd;
u32 parameng_quadcmd;
};
struct p9100_par {
spinlock_t lock;
struct p9100_regs __iomem *regs;
u32 flags;
#define P9100_FLAG_BLANKED 0x00000001
unsigned long which_io;
};
/**
* p9100_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int p9100_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct p9100_par *par = (struct p9100_par *) info->par;
struct p9100_regs __iomem *regs = par->regs;
unsigned long flags;
if (regno >= 256)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
spin_lock_irqsave(&par->lock, flags);
sbus_writel((regno << 16), ®s->ramdac_cmap_wridx);
sbus_writel((red << 16), ®s->ramdac_palette_data);
sbus_writel((green << 16), ®s->ramdac_palette_data);
sbus_writel((blue << 16), ®s->ramdac_palette_data);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
/**
* p9100_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
p9100_blank(int blank, struct fb_info *info)
{
struct p9100_par *par = (struct p9100_par *) info->par;
struct p9100_regs __iomem *regs = par->regs;
unsigned long flags;
u32 val;
spin_lock_irqsave(&par->lock, flags);
switch (blank) {
case FB_BLANK_UNBLANK: /* Unblanking */
val = sbus_readl(®s->vid_screenpaint_timectl1);
val |= SCREENPAINT_TIMECTL1_ENABLE_VIDEO;
sbus_writel(val, ®s->vid_screenpaint_timectl1);
par->flags &= ~P9100_FLAG_BLANKED;
break;
case FB_BLANK_NORMAL: /* Normal blanking */
case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
case FB_BLANK_POWERDOWN: /* Poweroff */
val = sbus_readl(®s->vid_screenpaint_timectl1);
val &= ~SCREENPAINT_TIMECTL1_ENABLE_VIDEO;
sbus_writel(val, ®s->vid_screenpaint_timectl1);
par->flags |= P9100_FLAG_BLANKED;
break;
}
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
static struct sbus_mmap_map p9100_mmap_map[] = {
{ CG3_MMAP_OFFSET, 0, SBUS_MMAP_FBSIZE(1) },
{ 0, 0, 0 }
};
static int p9100_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct p9100_par *par = (struct p9100_par *)info->par;
return sbusfb_mmap_helper(p9100_mmap_map,
info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
/* Make it look like a cg3. */
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_SUN3COLOR, 8, info->fix.smem_len);
}
/*
* Initialisation
*/
static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_node *dp)
{
strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = linebytes;
info->fix.accel = FB_ACCEL_SUN_CGTHREE;
}
static int __devinit p9100_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct p9100_par *par;
int linebytes, err;
info = framebuffer_alloc(sizeof(struct p9100_par), &op->dev);
err = -ENOMEM;
if (!info)
goto out_err;
par = info->par;
spin_lock_init(&par->lock);
/* This is the framebuffer and the only resource apps can mmap. */
info->fix.smem_start = op->resource[2].start;
par->which_io = op->resource[2].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
linebytes = of_getintprop_default(dp, "linebytes", info->var.xres);
info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->regs = of_ioremap(&op->resource[0], 0,
sizeof(struct p9100_regs), "p9100 regs");
if (!par->regs)
goto out_release_fb;
info->flags = FBINFO_DEFAULT;
info->fbops = &p9100_ops;
info->screen_base = of_ioremap(&op->resource[2], 0,
info->fix.smem_len, "p9100 ram");
if (!info->screen_base)
goto out_unmap_regs;
p9100_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_screen;
p9100_init_fix(info, linebytes, dp);
err = register_framebuffer(info);
if (err < 0)
goto out_dealloc_cmap;
fb_set_cmap(&info->cmap, info);
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: p9100 at %lx:%lx\n",
dp->full_name,
par->which_io, info->fix.smem_start);
return 0;
out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_screen:
of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
out_unmap_regs:
of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
out_release_fb:
framebuffer_release(info);
out_err:
return err;
}
static int __devexit p9100_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct p9100_par *par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
framebuffer_release(info);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id p9100_match[] = {
{
.name = "p9100",
},
{},
};
MODULE_DEVICE_TABLE(of, p9100_match);
static struct platform_driver p9100_driver = {
.driver = {
.name = "p9100",
.owner = THIS_MODULE,
.of_match_table = p9100_match,
},
.probe = p9100_probe,
.remove = __devexit_p(p9100_remove),
};
static int __init p9100_init(void)
{
if (fb_get_options("p9100fb", NULL))
return -ENODEV;
return platform_driver_register(&p9100_driver);
}
static void __exit p9100_exit(void)
{
platform_driver_unregister(&p9100_driver);
}
module_init(p9100_init);
module_exit(p9100_exit);
MODULE_DESCRIPTION("framebuffer driver for P9100 chipsets");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
opinsys/opinsys-linux | drivers/scsi/mac53c94.c | 8431 | 15374 | /*
* SCSI low-level driver for the 53c94 SCSI bus adaptor found
* on Power Macintosh computers, controlling the external SCSI chain.
* We assume the 53c94 is connected to a DBDMA (descriptor-based DMA)
* controller.
*
* Paul Mackerras, August 1996.
* Copyright (C) 1996 Paul Mackerras.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "mac53c94.h"
enum fsc_phase {
idle,
selecting,
dataing,
completing,
busfreeing,
};
struct fsc_state {
struct mac53c94_regs __iomem *regs;
int intr;
struct dbdma_regs __iomem *dma;
int dmaintr;
int clk_freq;
struct Scsi_Host *host;
struct scsi_cmnd *request_q;
struct scsi_cmnd *request_qtail;
struct scsi_cmnd *current_req; /* req we're currently working on */
enum fsc_phase phase; /* what we're currently trying to do */
struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
void *dma_cmd_space;
struct pci_dev *pdev;
dma_addr_t dma_addr;
struct macio_dev *mdev;
};
static void mac53c94_init(struct fsc_state *);
static void mac53c94_start(struct fsc_state *);
static void mac53c94_interrupt(int, void *);
static irqreturn_t do_mac53c94_interrupt(int, void *);
static void cmd_done(struct fsc_state *, int result);
static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *);
static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct fsc_state *state;
#if 0
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
int i;
printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
for (i = 0; i < cmd->cmd_len; ++i)
printk(KERN_CONT " %.2x", cmd->cmnd[i]);
printk(KERN_CONT "\n");
printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
}
#endif
cmd->scsi_done = done;
cmd->host_scribble = NULL;
state = (struct fsc_state *) cmd->device->host->hostdata;
if (state->request_q == NULL)
state->request_q = cmd;
else
state->request_qtail->host_scribble = (void *) cmd;
state->request_qtail = cmd;
if (state->phase == idle)
mac53c94_start(state);
return 0;
}
static DEF_SCSI_QCMD(mac53c94_queue)
static int mac53c94_host_reset(struct scsi_cmnd *cmd)
{
struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata;
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
unsigned long flags;
spin_lock_irqsave(cmd->device->host->host_lock, flags);
writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
writeb(CMD_SCSI_RESET, ®s->command); /* assert RST */
udelay(100); /* leave it on for a while (>= 25us) */
writeb(CMD_RESET, ®s->command);
udelay(20);
mac53c94_init(state);
writeb(CMD_NOP, ®s->command);
spin_unlock_irqrestore(cmd->device->host->host_lock, flags);
return SUCCESS;
}
static void mac53c94_init(struct fsc_state *state)
{
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
int x;
writeb(state->host->this_id | CF1_PAR_ENABLE, ®s->config1);
writeb(TIMO_VAL(250), ®s->sel_timeout); /* 250ms */
writeb(CLKF_VAL(state->clk_freq), ®s->clk_factor);
writeb(CF2_FEATURE_EN, ®s->config2);
writeb(0, ®s->config3);
writeb(0, ®s->sync_period);
writeb(0, ®s->sync_offset);
x = readb(®s->interrupt);
writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
}
/*
* Start the next command for a 53C94.
* Should be called with interrupts disabled.
*/
static void mac53c94_start(struct fsc_state *state)
{
struct scsi_cmnd *cmd;
struct mac53c94_regs __iomem *regs = state->regs;
int i;
if (state->phase != idle || state->current_req != NULL)
panic("inappropriate mac53c94_start (state=%p)", state);
if (state->request_q == NULL)
return;
state->current_req = cmd = state->request_q;
state->request_q = (struct scsi_cmnd *) cmd->host_scribble;
/* Off we go */
writeb(0, ®s->count_lo);
writeb(0, ®s->count_mid);
writeb(0, ®s->count_hi);
writeb(CMD_NOP + CMD_DMA_MODE, ®s->command);
udelay(1);
writeb(CMD_FLUSH, ®s->command);
udelay(1);
writeb(cmd->device->id, ®s->dest_id);
writeb(0, ®s->sync_period);
writeb(0, ®s->sync_offset);
/* load the command into the FIFO */
for (i = 0; i < cmd->cmd_len; ++i)
writeb(cmd->cmnd[i], ®s->fifo);
/* do select without ATN XXX */
writeb(CMD_SELECT, ®s->command);
state->phase = selecting;
set_dma_cmds(state, cmd);
}
static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
{
unsigned long flags;
struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host;
spin_lock_irqsave(dev->host_lock, flags);
mac53c94_interrupt(irq, dev_id);
spin_unlock_irqrestore(dev->host_lock, flags);
return IRQ_HANDLED;
}
static void mac53c94_interrupt(int irq, void *dev_id)
{
struct fsc_state *state = (struct fsc_state *) dev_id;
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
struct scsi_cmnd *cmd = state->current_req;
int nb, stat, seq, intr;
static int mac53c94_errors;
/*
* Apparently, reading the interrupt register unlatches
* the status and sequence step registers.
*/
seq = readb(®s->seqstep);
stat = readb(®s->status);
intr = readb(®s->interrupt);
#if 0
printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n",
intr, stat, seq, state->phase);
#endif
if (intr & INTR_RESET) {
/* SCSI bus was reset */
printk(KERN_INFO "external SCSI bus reset detected\n");
writeb(CMD_NOP, ®s->command);
writel(RUN << 16, &dma->control); /* stop dma */
cmd_done(state, DID_RESET << 16);
return;
}
if (intr & INTR_ILL_CMD) {
printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n",
intr, stat, seq, state->phase);
cmd_done(state, DID_ERROR << 16);
return;
}
if (stat & STAT_ERROR) {
#if 0
/* XXX these seem to be harmless? */
printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n",
intr, stat, seq, state->phase);
#endif
++mac53c94_errors;
writeb(CMD_NOP + CMD_DMA_MODE, ®s->command);
}
if (cmd == 0) {
printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
return;
}
if (stat & STAT_PARITY) {
printk(KERN_ERR "mac53c94: parity error\n");
cmd_done(state, DID_PARITY << 16);
return;
}
switch (state->phase) {
case selecting:
if (intr & INTR_DISCONNECT) {
/* selection timed out */
cmd_done(state, DID_BAD_TARGET << 16);
return;
}
if (intr != INTR_BUS_SERV + INTR_DONE) {
printk(KERN_DEBUG "got intr %x during selection\n", intr);
cmd_done(state, DID_ERROR << 16);
return;
}
if ((seq & SS_MASK) != SS_DONE) {
printk(KERN_DEBUG "seq step %x after command\n", seq);
cmd_done(state, DID_ERROR << 16);
return;
}
writeb(CMD_NOP, ®s->command);
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
&& (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
nb = cmd->SCp.this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
cmd->SCp.this_residual -= nb;
writeb(nb, ®s->count_lo);
writeb(nb >> 8, ®s->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, ®s->command);
writel(virt_to_phys(state->dma_cmds), &dma->cmdptr);
writel((RUN << 16) | RUN, &dma->control);
writeb(CMD_DMA_MODE + CMD_XFER_DATA, ®s->command);
state->phase = dataing;
break;
} else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) {
/* up to status phase already */
writeb(CMD_I_COMPLETE, ®s->command);
state->phase = completing;
} else {
printk(KERN_DEBUG "in unexpected phase %x after cmd\n",
stat & STAT_PHASE);
cmd_done(state, DID_ERROR << 16);
return;
}
break;
case dataing:
if (intr != INTR_BUS_SERV) {
printk(KERN_DEBUG "got intr %x before status\n", intr);
cmd_done(state, DID_ERROR << 16);
return;
}
if (cmd->SCp.this_residual != 0
&& (stat & (STAT_MSG|STAT_CD)) == 0) {
/* Set up the count regs to transfer more */
nb = cmd->SCp.this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
cmd->SCp.this_residual -= nb;
writeb(nb, ®s->count_lo);
writeb(nb >> 8, ®s->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, ®s->command);
writeb(CMD_DMA_MODE + CMD_XFER_DATA, ®s->command);
break;
}
if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) {
printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
}
writel(RUN << 16, &dma->control); /* stop dma */
scsi_dma_unmap(cmd);
/* should check dma status */
writeb(CMD_I_COMPLETE, ®s->command);
state->phase = completing;
break;
case completing:
if (intr != INTR_DONE) {
printk(KERN_DEBUG "got intr %x on completion\n", intr);
cmd_done(state, DID_ERROR << 16);
return;
}
cmd->SCp.Status = readb(®s->fifo);
cmd->SCp.Message = readb(®s->fifo);
cmd->result = CMD_ACCEPT_MSG;
writeb(CMD_ACCEPT_MSG, ®s->command);
state->phase = busfreeing;
break;
case busfreeing:
if (intr != INTR_DISCONNECT) {
printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr);
}
cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8)
+ cmd->SCp.Status);
break;
default:
printk(KERN_DEBUG "don't know about phase %d\n", state->phase);
}
}
static void cmd_done(struct fsc_state *state, int result)
{
struct scsi_cmnd *cmd;
cmd = state->current_req;
if (cmd != 0) {
cmd->result = result;
(*cmd->scsi_done)(cmd);
state->current_req = NULL;
}
state->phase = idle;
mac53c94_start(state);
}
/*
* Set up DMA commands for transferring data.
*/
static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
{
int i, dma_cmd, total, nseg;
struct scatterlist *scl;
struct dbdma_cmd *dcmds;
dma_addr_t dma_addr;
u32 dma_len;
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (!nseg)
return;
dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
OUTPUT_MORE : INPUT_MORE;
dcmds = state->dma_cmds;
total = 0;
scsi_for_each_sg(cmd, scl, nseg, i) {
dma_addr = sg_dma_address(scl);
dma_len = sg_dma_len(scl);
if (dma_len > 0xffff)
panic("mac53c94: scatterlist element >= 64k");
total += dma_len;
st_le16(&dcmds->req_count, dma_len);
st_le16(&dcmds->command, dma_cmd);
st_le32(&dcmds->phy_addr, dma_addr);
dcmds->xfer_status = 0;
++dcmds;
}
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
st_le16(&dcmds[-1].command, dma_cmd);
st_le16(&dcmds->command, DBDMA_STOP);
cmd->SCp.this_residual = total;
}
static struct scsi_host_template mac53c94_template = {
.proc_name = "53c94",
.name = "53C94",
.queuecommand = mac53c94_queue,
.eh_host_reset_handler = mac53c94_host_reset,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *node = macio_get_of_node(mdev);
struct pci_dev *pdev = macio_get_pci_dev(mdev);
struct fsc_state *state;
struct Scsi_Host *host;
void *dma_cmd_space;
const unsigned char *clkprop;
int proplen, rc = -ENODEV;
if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
printk(KERN_ERR "mac53c94: expected 2 addrs and intrs"
" (got %d/%d)\n",
macio_resource_count(mdev), macio_irq_count(mdev));
return -ENODEV;
}
if (macio_request_resources(mdev, "mac53c94") != 0) {
printk(KERN_ERR "mac53c94: unable to request memory resources");
return -EBUSY;
}
host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state));
if (host == NULL) {
printk(KERN_ERR "mac53c94: couldn't register host");
rc = -ENOMEM;
goto out_release;
}
state = (struct fsc_state *) host->hostdata;
macio_set_drvdata(mdev, state);
state->host = host;
state->pdev = pdev;
state->mdev = mdev;
state->regs = (struct mac53c94_regs __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x1000);
state->intr = macio_irq(mdev, 0);
state->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x1000);
state->dmaintr = macio_irq(mdev, 1);
if (state->regs == NULL || state->dma == NULL) {
printk(KERN_ERR "mac53c94: ioremap failed for %s\n",
node->full_name);
goto out_free;
}
clkprop = of_get_property(node, "clock-frequency", &proplen);
if (clkprop == NULL || proplen != sizeof(int)) {
printk(KERN_ERR "%s: can't get clock frequency, "
"assuming 25MHz\n", node->full_name);
state->clk_freq = 25000000;
} else
state->clk_freq = *(int *)clkprop;
/* Space for dma command list: +1 for stop command,
* +1 to allow for aligning.
* XXX FIXME: Use DMA consistent routines
*/
dma_cmd_space = kmalloc((host->sg_tablesize + 2) *
sizeof(struct dbdma_cmd), GFP_KERNEL);
if (dma_cmd_space == 0) {
printk(KERN_ERR "mac53c94: couldn't allocate dma "
"command space for %s\n", node->full_name);
rc = -ENOMEM;
goto out_free;
}
state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
memset(state->dma_cmds, 0, (host->sg_tablesize + 1)
* sizeof(struct dbdma_cmd));
state->dma_cmd_space = dma_cmd_space;
mac53c94_init(state);
if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) {
printk(KERN_ERR "mac53C94: can't get irq %d for %s\n",
state->intr, node->full_name);
goto out_free_dma;
}
rc = scsi_add_host(host, &mdev->ofdev.dev);
if (rc != 0)
goto out_release_irq;
scsi_scan_host(host);
return 0;
out_release_irq:
free_irq(state->intr, state);
out_free_dma:
kfree(state->dma_cmd_space);
out_free:
if (state->dma != NULL)
iounmap(state->dma);
if (state->regs != NULL)
iounmap(state->regs);
scsi_host_put(host);
out_release:
macio_release_resources(mdev);
return rc;
}
static int mac53c94_remove(struct macio_dev *mdev)
{
struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev);
struct Scsi_Host *host = fp->host;
scsi_remove_host(host);
free_irq(fp->intr, fp);
if (fp->regs)
iounmap(fp->regs);
if (fp->dma)
iounmap(fp->dma);
kfree(fp->dma_cmd_space);
scsi_host_put(host);
macio_release_resources(mdev);
return 0;
}
static struct of_device_id mac53c94_match[] =
{
{
.name = "53c94",
},
{},
};
MODULE_DEVICE_TABLE (of, mac53c94_match);
static struct macio_driver mac53c94_driver =
{
.driver = {
.name = "mac53c94",
.owner = THIS_MODULE,
.of_match_table = mac53c94_match,
},
.probe = mac53c94_probe,
.remove = mac53c94_remove,
};
static int __init init_mac53c94(void)
{
return macio_register_driver(&mac53c94_driver);
}
static void __exit exit_mac53c94(void)
{
return macio_unregister_driver(&mac53c94_driver);
}
module_init(init_mac53c94);
module_exit(exit_mac53c94);
MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver");
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ReVolt-ROM/kernel_lge_mako | drivers/media/video/pwc/pwc-kiara.c | 10479 | 35376 | /* Linux driver for Philips webcam
(C) 2004-2006 Luc Saillard (luc@saillard.org)
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
Please send bug reports and support requests to <luc@saillard.org>.
The decompression routines have been implemented by reverse-engineering the
Nemosoft binary pwcx module. Caveat emptor.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* This tables contains entries for the 730/740/750 (Kiara) camera, with
4 different qualities (no compression, low, medium, high).
It lists the bandwidth requirements for said mode by its alternate interface
number. An alternate of 0 means that the mode is unavailable.
There are 6 * 4 * 4 entries:
6 different resolutions subqcif, qsif, qcif, sif, cif, vga
6 framerates: 5, 10, 15, 20, 25, 30
4 compression modi: none, low, medium, high
When an uncompressed mode is not available, the next available compressed mode
will be chosen (unless the decompressor is absent). Sometimes there are only
1 or 2 compressed modes available; in that case entries are duplicated.
*/
#include "pwc-kiara.h"
const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 };
const struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] =
{
/* SQCIF */
{
/* 5 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 10 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 15 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 20 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 25 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 30 fps */
{
{0, },
{0, },
{0, },
{0, },
},
},
/* QSIF */
{
/* 5 fps */
{
{1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
{1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
{1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
{1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
},
/* 10 fps */
{
{2, 291, 0, {0x1C, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x23, 0x01, 0x80}},
{1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
{1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
{1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
},
/* 15 fps */
{
{3, 437, 0, {0x1B, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xB5, 0x01, 0x80}},
{2, 292, 640, {0x13, 0xF4, 0x30, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x20, 0x24, 0x01, 0x80}},
{2, 292, 640, {0x13, 0xF4, 0x30, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x20, 0x24, 0x01, 0x80}},
{1, 192, 420, {0x13, 0xF4, 0x30, 0x0D, 0x1B, 0x0C, 0x53, 0x1E, 0x18, 0xC0, 0x00, 0x80}},
},
/* 20 fps */
{
{4, 589, 0, {0x1A, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x4D, 0x02, 0x80}},
{3, 448, 730, {0x12, 0xF4, 0x30, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x18, 0xC0, 0x01, 0x80}},
{2, 292, 476, {0x12, 0xF4, 0x30, 0x0E, 0xD8, 0x0E, 0x10, 0x19, 0x18, 0x24, 0x01, 0x80}},
{1, 192, 312, {0x12, 0xF4, 0x50, 0x09, 0xB3, 0x08, 0xEB, 0x1E, 0x18, 0xC0, 0x00, 0x80}},
},
/* 25 fps */
{
{5, 703, 0, {0x19, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xBF, 0x02, 0x80}},
{3, 447, 610, {0x11, 0xF4, 0x30, 0x13, 0x0B, 0x12, 0x43, 0x14, 0x28, 0xBF, 0x01, 0x80}},
{2, 292, 398, {0x11, 0xF4, 0x50, 0x0C, 0x6C, 0x0B, 0xA4, 0x1E, 0x28, 0x24, 0x01, 0x80}},
{1, 193, 262, {0x11, 0xF4, 0x50, 0x08, 0x23, 0x07, 0x5B, 0x1E, 0x28, 0xC1, 0x00, 0x80}},
},
/* 30 fps */
{
{8, 874, 0, {0x18, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x6A, 0x03, 0x80}},
{5, 704, 730, {0x10, 0xF4, 0x30, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x28, 0xC0, 0x02, 0x80}},
{3, 448, 492, {0x10, 0xF4, 0x30, 0x0F, 0x5D, 0x0E, 0x95, 0x15, 0x28, 0xC0, 0x01, 0x80}},
{2, 292, 320, {0x10, 0xF4, 0x50, 0x09, 0xFB, 0x09, 0x33, 0x1E, 0x28, 0x24, 0x01, 0x80}},
},
},
/* QCIF */
{
/* 5 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 10 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 15 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 20 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 25 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 30 fps */
{
{0, },
{0, },
{0, },
{0, },
},
},
/* SIF */
{
/* 5 fps */
{
{4, 582, 0, {0x0D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x46, 0x02, 0x80}},
{3, 387, 1276, {0x05, 0xF4, 0x30, 0x27, 0xD8, 0x26, 0x48, 0x03, 0x10, 0x83, 0x01, 0x80}},
{2, 291, 960, {0x05, 0xF4, 0x30, 0x1D, 0xF2, 0x1C, 0x62, 0x04, 0x10, 0x23, 0x01, 0x80}},
{1, 191, 630, {0x05, 0xF4, 0x50, 0x13, 0xA9, 0x12, 0x19, 0x05, 0x18, 0xBF, 0x00, 0x80}},
},
/* 10 fps */
{
{0, },
{6, 775, 1278, {0x04, 0xF4, 0x30, 0x27, 0xE8, 0x26, 0x58, 0x05, 0x30, 0x07, 0x03, 0x80}},
{3, 447, 736, {0x04, 0xF4, 0x30, 0x16, 0xFB, 0x15, 0x6B, 0x05, 0x28, 0xBF, 0x01, 0x80}},
{2, 292, 480, {0x04, 0xF4, 0x70, 0x0E, 0xF9, 0x0D, 0x69, 0x09, 0x28, 0x24, 0x01, 0x80}},
},
/* 15 fps */
{
{0, },
{9, 955, 1050, {0x03, 0xF4, 0x30, 0x20, 0xCF, 0x1F, 0x3F, 0x06, 0x48, 0xBB, 0x03, 0x80}},
{4, 592, 650, {0x03, 0xF4, 0x30, 0x14, 0x44, 0x12, 0xB4, 0x08, 0x30, 0x50, 0x02, 0x80}},
{3, 448, 492, {0x03, 0xF4, 0x50, 0x0F, 0x52, 0x0D, 0xC2, 0x09, 0x38, 0xC0, 0x01, 0x80}},
},
/* 20 fps */
{
{0, },
{9, 958, 782, {0x02, 0xF4, 0x30, 0x18, 0x6A, 0x16, 0xDA, 0x0B, 0x58, 0xBE, 0x03, 0x80}},
{5, 703, 574, {0x02, 0xF4, 0x50, 0x11, 0xE7, 0x10, 0x57, 0x0B, 0x40, 0xBF, 0x02, 0x80}},
{3, 446, 364, {0x02, 0xF4, 0x90, 0x0B, 0x5C, 0x09, 0xCC, 0x0E, 0x38, 0xBE, 0x01, 0x80}},
},
/* 25 fps */
{
{0, },
{9, 958, 654, {0x01, 0xF4, 0x30, 0x14, 0x66, 0x12, 0xD6, 0x0B, 0x50, 0xBE, 0x03, 0x80}},
{6, 776, 530, {0x01, 0xF4, 0x50, 0x10, 0x8C, 0x0E, 0xFC, 0x0C, 0x48, 0x08, 0x03, 0x80}},
{4, 592, 404, {0x01, 0xF4, 0x70, 0x0C, 0x96, 0x0B, 0x06, 0x0B, 0x48, 0x50, 0x02, 0x80}},
},
/* 30 fps */
{
{0, },
{9, 957, 526, {0x00, 0xF4, 0x50, 0x10, 0x68, 0x0E, 0xD8, 0x0D, 0x58, 0xBD, 0x03, 0x80}},
{6, 775, 426, {0x00, 0xF4, 0x70, 0x0D, 0x48, 0x0B, 0xB8, 0x0F, 0x50, 0x07, 0x03, 0x80}},
{4, 590, 324, {0x00, 0x7A, 0x88, 0x0A, 0x1C, 0x08, 0xB4, 0x0E, 0x50, 0x4E, 0x02, 0x80}},
},
},
/* CIF */
{
/* 5 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 10 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 15 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 20 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 25 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 30 fps */
{
{0, },
{0, },
{0, },
{0, },
},
},
/* VGA */
{
/* 5 fps */
{
{0, },
{6, 773, 1272, {0x25, 0xF4, 0x30, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x03, 0x80}},
{4, 592, 976, {0x25, 0xF4, 0x50, 0x1E, 0x78, 0x1B, 0x58, 0x03, 0x30, 0x50, 0x02, 0x80}},
{3, 448, 738, {0x25, 0xF4, 0x90, 0x17, 0x0C, 0x13, 0xEC, 0x04, 0x30, 0xC0, 0x01, 0x80}},
},
/* 10 fps */
{
{0, },
{9, 956, 788, {0x24, 0xF4, 0x70, 0x18, 0x9C, 0x15, 0x7C, 0x03, 0x48, 0xBC, 0x03, 0x80}},
{6, 776, 640, {0x24, 0xF4, 0xB0, 0x13, 0xFC, 0x11, 0x2C, 0x04, 0x48, 0x08, 0x03, 0x80}},
{4, 592, 488, {0x24, 0x7A, 0xE8, 0x0F, 0x3C, 0x0C, 0x6C, 0x06, 0x48, 0x50, 0x02, 0x80}},
},
/* 15 fps */
{
{0, },
{9, 957, 526, {0x23, 0x7A, 0xE8, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x03, 0x80}},
{9, 957, 526, {0x23, 0x7A, 0xE8, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x03, 0x80}},
{8, 895, 492, {0x23, 0x7A, 0xE8, 0x0F, 0x5D, 0x0C, 0x8D, 0x06, 0x58, 0x7F, 0x03, 0x80}},
},
/* 20 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 25 fps */
{
{0, },
{0, },
{0, },
{0, },
},
/* 30 fps */
{
{0, },
{0, },
{0, },
{0, },
},
},
};
/*
* Rom table for kiara chips
*
* 32 roms tables (one for each resolution ?)
* 2 tables per roms (one for each passes) (Y, and U&V)
* 128 bytes per passes
*/
const unsigned int KiaraRomTable [8][2][16][8] =
{
{ /* version 0 */
{ /* version 0, passes 0 */
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000001,0x00000001},
{0x00000000,0x00000000,0x00000009,0x00000009,
0x00000009,0x00000009,0x00000009,0x00000009},
{0x00000000,0x00000000,0x00000009,0x00000049,
0x00000049,0x00000049,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000249,0x0000024a,0x00000049},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000249,0x00000249,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000049,0x00000249,
0x00000249,0x0000124a,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000049,0x00000249,
0x0000124a,0x00009252,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00009252,0x00009292,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x00009292,0x00009292,0x00009493,0x000124db},
{0x00000000,0x00000000,0x00000249,0x0000924a,
0x00009492,0x0000a49b,0x0000a49b,0x000124db},
{0x00000000,0x00000000,0x00001249,0x00009252,
0x0000a493,0x000124db,0x000124db,0x000126dc},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x000124db,0x000126dc,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000124db,0x000136e4,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x0001b724,0x0001b92d,0x0001b925},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001b925,0x0001c96e,0x0001c92d},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 0, passes 1 */
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000},
{0x00000000,0x00000000,0x00000001,0x00000009,
0x00000009,0x00000009,0x00000009,0x00000001},
{0x00000000,0x00000000,0x00000009,0x00000009,
0x00000049,0x00000049,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000249,0x00000249,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000049,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00001252},
{0x00000000,0x00000000,0x00000049,0x00001249,
0x0000124a,0x0000124a,0x00001252,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x00009252,0x00009252,0x00009292,0x00009493},
{0x00000000,0x00000000,0x00000249,0x0000924a,
0x00009292,0x00009292,0x00009292,0x00009493},
{0x00000000,0x00000000,0x00000249,0x00009292,
0x00009492,0x00009493,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x0000a493,0x000124db,0x000126dc,0x000126dc},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000126dc,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x00009252,0x00009493,
0x000126dc,0x000126dc,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000136e4,0x000136e4,0x0001b725,0x0001b724},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 1 */
{ /* version 1, passes 0 */
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000001},
{0x00000000,0x00000000,0x00000009,0x00000009,
0x00000009,0x00000009,0x00000009,0x00000009},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000249,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000049,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00001252},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x0000124a,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x0000124a,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x0000124a,0x00009252,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x00009252,0x00009292,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x00009252,0x00009292,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00000249,0x0000924a,
0x00009252,0x00009493,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00000249,0x0000924a,
0x00009292,0x00009493,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00000249,0x00009252,
0x00009492,0x00009493,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x000124db,0x000124db,0x000124db},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000126dc,0x000126dc,0x000126dc},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 1, passes 1 */
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000},
{0x00000000,0x00000000,0x00000049,0x00000009,
0x00000049,0x00000009,0x00000001,0x00000000},
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x00000049,0x00000000},
{0x00000000,0x00000000,0x00000249,0x00000049,
0x00000249,0x00000049,0x0000024a,0x00000001},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00000001},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00000001},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00000009},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x0000124a,0x0000024a,0x00000009},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x0000124a,0x0000024a,0x00000009},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x00009252,0x00001252,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x00009292,0x00001252,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x00009292,0x00001252,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00001252,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009292,0x00009292,0x00001252,0x0000024a},
{0x00000000,0x00000000,0x0000924a,0x0000924a,
0x00009492,0x00009493,0x00009292,0x00001252},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 2 */
{ /* version 2, passes 0 */
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x0000124a,0x00001252,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x00009252,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x0000124a,0x00009292,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00000249,0x00001249,
0x00009252,0x00009493,0x00009493,0x0000a49b},
{0x00000000,0x00000000,0x00000249,0x0000924a,
0x00009292,0x00009493,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009292,0x00009493,0x0000a49b,0x000124db},
{0x00000000,0x00000000,0x00001249,0x00009252,
0x00009492,0x0000a49b,0x0000a49b,0x000124db},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x000124db,0x000124db,0x000126dc},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x0000a493,0x000124db,0x000126dc,0x000126dc},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x000136e4},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000126dc,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0001249b,0x000126dc,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000136e4,0x000136e4,0x0001b724},
{0x00000000,0x00000000,0x00009252,0x000124db,
0x000126dc,0x0001b724,0x0001b725,0x0001b925},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 2, passes 1 */
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x00000249,0x0000024a,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00000249,
0x0000124a,0x0000124a,0x00001252,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x0000124a,0x00009292,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009292,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x0000a49b,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009292,0x00009493,0x0000a49b,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009292,0x00009493,0x0000a49b,0x00001252},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009492,0x0000a49b,0x0000a49b,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00009252,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x00009252,0x0000a49b,
0x0001249b,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 3 */
{ /* version 3, passes 0 */
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x0000124a,0x00009292,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009292,0x00009493,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009492,0x0000a49b,0x0000a49b,0x000124db},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x000124db,0x000126dc,0x000126dc},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x000126dc},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000126dc,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000126dc,0x000136e4,0x0001b724},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0001249b,0x000126dc,0x000136e4,0x0001b724},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000126dc,0x000136e4,0x0001b724},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000136e4,0x0001b725,0x0001b724},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000136e4,0x0001b725,0x0001b925},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x000136e4,0x0001b92d,0x0001b925},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x0001b724,0x0001b92d,0x0001c92d},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000126dc,0x0001b724,0x0001c96e,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000126db,
0x000136e4,0x0001b925,0x00025bb6,0x00024b77},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 3, passes 1 */
{0x00000000,0x00000000,0x00001249,0x00000249,
0x0000124a,0x0000124a,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009292,0x00001252},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009492,0x00009493,0x0000a49b,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00009252,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x0000a49b,0x000126dc,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x0000a49b,0x000126dc,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x0000a49b,0x000126dc,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x000124db,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00009492,0x0000a49b,
0x000136e4,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x0001b724,0x0001b724,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 4 */
{ /* version 4, passes 0 */
{0x00000000,0x00000000,0x00000049,0x00000049,
0x00000049,0x00000049,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00000249,0x00000049,
0x00000249,0x00000249,0x0000024a,0x00000049},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x0000124a,0x00009252,0x00001252,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009493,0x00001252},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009292,0x00009493,0x00009493,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000124db,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0001249b,0x000126dc,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00009252,0x00009493,
0x000124db,0x000136e4,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009252,0x0000a49b,
0x000124db,0x000136e4,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x000136e4,0x000136e4,0x000136e4},
{0x00000000,0x00000000,0x00009492,0x0000a49b,
0x000126dc,0x0001b724,0x0001b725,0x0001b724},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 4, passes 1 */
{0x00000000,0x00000000,0x00000249,0x00000049,
0x00000009,0x00000009,0x00000009,0x00000009},
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000049,0x00000049,0x00000009,0x00000009},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x00000249,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x0000124a,0x00000049,0x00000049},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x0000124a,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009252,0x0000124a,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x00009252,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x00009292,0x00009292,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x00009292,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x00009493,0x00009493,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009493,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000124db,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000136e4,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009252,0x000124db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 5 */
{ /* version 5, passes 0 */
{0x00000000,0x00000000,0x00000249,0x00000249,
0x00000249,0x00000249,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009292,0x00001252},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009492,0x0000a49b,0x0000a49b,0x00009292},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x0000a49b,0x000124db,0x00009493},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000126dc,0x000136e4,0x000124db},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000126dc,0x000136e4,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x000136e4,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x0001b724,0x0001b725,0x000136e4},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
{0x00000000,0x00000000,0x00009492,0x0000a49b,
0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001b925,0x0001c96e,0x0001b925},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000126db,
0x0001c924,0x0002496d,0x00025bb6,0x00024b77},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 5, passes 1 */
{0x00000000,0x00000000,0x00001249,0x00000249,
0x00000249,0x00000249,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x0000124a,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x0000924a,
0x00009252,0x00009252,0x0000024a,0x0000024a},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x0000a49b,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x0000a49b,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x0000a49b,0x00009292,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009493,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000124db,0x00009493,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000124db,0x00009493,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000126dc,0x000126dc,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000136e4,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009292,0x000124db,
0x000136e4,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009492,0x000126db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 6 */
{ /* version 6, passes 0 */
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000126dc,0x000136e4,0x000124db},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000126dc,0x000136e4,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000126dc,0x0001b724,0x0001b725,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000136e4,0x0001b724,0x0001b92d,0x000136e4},
{0x00000000,0x00000000,0x00009492,0x0000a49b,
0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x0001b724,0x0001c92d,0x0001c96e,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x0001b724,0x0001c92d,0x00024b76,0x0002496e},
{0x00000000,0x00000000,0x00012492,0x000126db,
0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 6, passes 1 */
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x0000124a,0x00001252,0x00001252},
{0x00000000,0x00000000,0x00001249,0x00009292,
0x00009492,0x00009252,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x0000a493,0x00009292,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x0000a49b,0x00009493,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000124db,0x00009493,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000124db,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000126dc,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000126dc,0x000126dc,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000136e4,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009492,0x000126db,
0x000136e4,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009492,0x000126db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00009492,0x000126db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x0000a492,0x000136db,
0x0001c924,0x0001b724,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
},
{ /* version 7 */
{ /* version 7, passes 0 */
{0x00000000,0x00000000,0x00001249,0x00001249,
0x00009252,0x00009292,0x00009493,0x00009493},
{0x00000000,0x00000000,0x00001249,0x00009493,
0x0000a493,0x000124db,0x000126dc,0x00009493},
{0x00000000,0x00000000,0x00001249,0x0000a49b,
0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0001249b,0x000126dc,0x000136e4,0x0000a49b},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000126dc,0x000136e4,0x0001b725,0x000124db},
{0x00000000,0x00000000,0x00009292,0x0000a49b,
0x000136e4,0x0001b724,0x0001b725,0x000126dc},
{0x00000000,0x00000000,0x00009292,0x000124db,
0x000136e4,0x0001b724,0x0001b725,0x000126dc},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001b724,0x0001c96e,0x000136e4},
{0x00000000,0x00000000,0x00009492,0x000124db,
0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
{0x00000000,0x00000000,0x0000a492,0x000124db,
0x0001b724,0x0001c92d,0x0001c96e,0x0001b925},
{0x00000000,0x00000000,0x0000a492,0x000126db,
0x0001b724,0x0001c92d,0x00024b76,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000126db,
0x0001b924,0x0001c92d,0x00024b76,0x0001c92d},
{0x00000000,0x00000000,0x0000a492,0x000126db,
0x0001b924,0x0001c92d,0x00024b76,0x0002496e},
{0x00000000,0x00000000,0x00012492,0x000136db,
0x00024924,0x00024b6d,0x0002ddb6,0x00025bbf},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
},
{ /* version 7, passes 1 */
{0x00000000,0x00000000,0x00001249,0x00001249,
0x0000124a,0x0000124a,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x00009493,
0x00009492,0x00009292,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00001252,0x00001252},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x0000a493,0x0000a49b,0x00009292,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x0000a49b,
0x000126dc,0x0000a49b,0x00009493,0x00009292},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000126dc,0x000124db,0x00009493,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000124db,
0x000136e4,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000136db,
0x0001b724,0x000124db,0x0000a49b,0x00009493},
{0x00000000,0x00000000,0x0000924a,0x000136db,
0x0001b724,0x000126dc,0x0000a49b,0x0000a49b},
{0x00000000,0x00000000,0x00009292,0x000136db,
0x0001b724,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x00009492,0x000136db,
0x0001b724,0x000126dc,0x000124db,0x0000a49b},
{0x00000000,0x00000000,0x0000a492,0x000136db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x0000a492,0x000136db,
0x0001b724,0x000136e4,0x000126dc,0x000124db},
{0x00000000,0x00000000,0x00012492,0x0001b6db,
0x0001c924,0x0001b724,0x000136e4,0x000126dc},
{0x00000000,0x00000000,0x00000000,0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000}
}
}
};
| gpl-2.0 |
ziqifan16/selective-cache | drivers/misc/cb710/debug.c | 14063 | 3344 | /*
* cb710/debug.c
*
* Copyright by Michał Mirosław, 2008-2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cb710.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define CB710_REG_COUNT 0x80
static const u16 allow[CB710_REG_COUNT/16] = {
0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
};
static const char *const prefix[ARRAY_SIZE(allow)] = {
"MMC", "MMC", "MMC", "MMC",
"MS?", "MS?", "SM?", "SM?"
};
static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
{
unsigned mask = (1 << bits/8) - 1;
offset *= bits/8;
return ((allow[block] >> offset) & mask) == mask;
}
#define CB710_READ_REGS_TEMPLATE(t) \
static void cb710_read_regs_##t(void __iomem *iobase, \
u##t *reg, unsigned select) \
{ \
unsigned i, j; \
\
for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
if (!(select & (1 << i))) \
continue; \
\
for (j = 0; j < 0x10/(t/8); ++j) { \
if (!allow_reg_read(i, j, t)) \
continue; \
reg[j] = ioread##t(iobase \
+ (i << 4) + (j * (t/8))); \
} \
} \
}
static const char cb710_regf_8[] = "%02X";
static const char cb710_regf_16[] = "%04X";
static const char cb710_regf_32[] = "%08X";
static const char cb710_xes[] = "xxxxxxxx";
#define CB710_DUMP_REGS_TEMPLATE(t) \
static void cb710_dump_regs_##t(struct device *dev, \
const u##t *reg, unsigned select) \
{ \
const char *const xp = &cb710_xes[8 - t/4]; \
const char *const format = cb710_regf_##t; \
\
char msg[100], *p; \
unsigned i, j; \
\
for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
if (!(select & (1 << i))) \
continue; \
p = msg; \
for (j = 0; j < 0x10/(t/8); ++j) { \
*p++ = ' '; \
if (j == 8/(t/8)) \
*p++ = ' '; \
if (allow_reg_read(i, j, t)) \
p += sprintf(p, format, reg[j]); \
else \
p += sprintf(p, "%s", xp); \
} \
dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \
} \
}
#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \
static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \
unsigned select) \
{ \
u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \
\
memset(®s, 0, sizeof(regs)); \
cb710_read_regs_##t(chip->iobase, regs, select); \
cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \
}
#define CB710_REG_ACCESS_TEMPLATES(t) \
CB710_READ_REGS_TEMPLATE(t) \
CB710_DUMP_REGS_TEMPLATE(t) \
CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
CB710_REG_ACCESS_TEMPLATES(8)
CB710_REG_ACCESS_TEMPLATES(16)
CB710_REG_ACCESS_TEMPLATES(32)
void cb710_dump_regs(struct cb710_chip *chip, unsigned select)
{
if (!(select & CB710_DUMP_REGS_MASK))
select = CB710_DUMP_REGS_ALL;
if (!(select & CB710_DUMP_ACCESS_MASK))
select |= CB710_DUMP_ACCESS_8;
if (select & CB710_DUMP_ACCESS_32)
cb710_read_and_dump_regs_32(chip, select);
if (select & CB710_DUMP_ACCESS_16)
cb710_read_and_dump_regs_16(chip, select);
if (select & CB710_DUMP_ACCESS_8)
cb710_read_and_dump_regs_8(chip, select);
}
EXPORT_SYMBOL_GPL(cb710_dump_regs);
| gpl-2.0 |
bestmjh47/android_kernel_kttech_e100_kk | drivers/media/video/msm/mt9e013.c | 240 | 29405 | /* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <media/msm_camera.h>
#include <mach/gpio.h>
#include <mach/camera.h>
#include "mt9e013.h"
/*=============================================================
SENSOR REGISTER DEFINES
==============================================================*/
#define REG_GROUPED_PARAMETER_HOLD 0x0104
#define GROUPED_PARAMETER_HOLD_OFF 0x00
#define GROUPED_PARAMETER_HOLD 0x01
/* Integration Time */
#define REG_COARSE_INTEGRATION_TIME 0x3012
/* Gain */
#define REG_GLOBAL_GAIN 0x305E
/* PLL registers */
#define REG_FRAME_LENGTH_LINES 0x0340
/* Test Pattern */
#define REG_TEST_PATTERN_MODE 0x0601
#define REG_VCM_NEW_CODE 0x30F2
/*============================================================================
TYPE DECLARATIONS
============================================================================*/
/* 16bit address - 8 bit context register structure */
#define Q8 0x00000100
#define Q10 0x00000400
#define MT9E013_MASTER_CLK_RATE 24000000
/* AF Total steps parameters */
#define MT9E013_TOTAL_STEPS_NEAR_TO_FAR 32
uint16_t mt9e013_step_position_table[MT9E013_TOTAL_STEPS_NEAR_TO_FAR+1];
uint16_t mt9e013_nl_region_boundary1;
uint16_t mt9e013_nl_region_code_per_step1;
uint16_t mt9e013_l_region_code_per_step = 4;
uint16_t mt9e013_damping_threshold = 10;
uint16_t mt9e013_sw_damping_time_wait = 1;
struct mt9e013_work_t {
struct work_struct work;
};
static struct mt9e013_work_t *mt9e013_sensorw;
static struct i2c_client *mt9e013_client;
struct mt9e013_ctrl_t {
const struct msm_camera_sensor_info *sensordata;
uint32_t sensormode;
uint32_t fps_divider;/* init to 1 * 0x00000400 */
uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
uint16_t fps;
uint16_t curr_lens_pos;
uint16_t curr_step_pos;
uint16_t my_reg_gain;
uint32_t my_reg_line_count;
uint16_t total_lines_per_frame;
enum mt9e013_resolution_t prev_res;
enum mt9e013_resolution_t pict_res;
enum mt9e013_resolution_t curr_res;
enum mt9e013_test_mode_t set_test;
};
static bool CSI_CONFIG;
static struct mt9e013_ctrl_t *mt9e013_ctrl;
static DECLARE_WAIT_QUEUE_HEAD(mt9e013_wait_queue);
DEFINE_MUTEX(mt9e013_mut);
static int cam_debug_init(void);
static struct dentry *debugfs_base;
/*=============================================================*/
static int mt9e013_i2c_rxdata(unsigned short saddr,
unsigned char *rxdata, int length)
{
struct i2c_msg msgs[] = {
{
.addr = saddr,
.flags = 0,
.len = 2,
.buf = rxdata,
},
{
.addr = saddr,
.flags = I2C_M_RD,
.len = 2,
.buf = rxdata,
},
};
if (i2c_transfer(mt9e013_client->adapter, msgs, 2) < 0) {
CDBG("mt9e013_i2c_rxdata faild 0x%x\n", saddr);
return -EIO;
}
return 0;
}
static int32_t mt9e013_i2c_txdata(unsigned short saddr,
unsigned char *txdata, int length)
{
struct i2c_msg msg[] = {
{
.addr = saddr,
.flags = 0,
.len = length,
.buf = txdata,
},
};
if (i2c_transfer(mt9e013_client->adapter, msg, 1) < 0) {
CDBG("mt9e013_i2c_txdata faild 0x%x\n", saddr);
return -EIO;
}
return 0;
}
static int32_t mt9e013_i2c_read(unsigned short raddr,
unsigned short *rdata, int rlen)
{
int32_t rc = 0;
unsigned char buf[2];
if (!rdata)
return -EIO;
memset(buf, 0, sizeof(buf));
buf[0] = (raddr & 0xFF00) >> 8;
buf[1] = (raddr & 0x00FF);
rc = mt9e013_i2c_rxdata(mt9e013_client->addr<<1, buf, rlen);
if (rc < 0) {
CDBG("mt9e013_i2c_read 0x%x failed!\n", raddr);
return rc;
}
*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
CDBG("mt9e013_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
return rc;
}
static int32_t mt9e013_i2c_write_w_sensor(unsigned short waddr, uint16_t wdata)
{
int32_t rc = -EFAULT;
unsigned char buf[4];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = (wdata & 0xFF00) >> 8;
buf[3] = (wdata & 0x00FF);
CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
rc = mt9e013_i2c_txdata(mt9e013_client->addr<<1, buf, 4);
if (rc < 0) {
CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
waddr, wdata);
}
return rc;
}
static int32_t mt9e013_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
{
int32_t rc = -EFAULT;
unsigned char buf[3];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = bdata;
CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
rc = mt9e013_i2c_txdata(mt9e013_client->addr<<1, buf, 3);
if (rc < 0) {
CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
waddr, bdata);
}
return rc;
}
static int32_t mt9e013_i2c_write_w_table(struct mt9e013_i2c_reg_conf const
*reg_conf_tbl, int num)
{
int i;
int32_t rc = -EIO;
for (i = 0; i < num; i++) {
rc = mt9e013_i2c_write_w_sensor(reg_conf_tbl->waddr,
reg_conf_tbl->wdata);
if (rc < 0)
break;
reg_conf_tbl++;
}
return rc;
}
static void mt9e013_group_hold_on(void)
{
mt9e013_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
GROUPED_PARAMETER_HOLD);
}
static void mt9e013_group_hold_off(void)
{
mt9e013_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
GROUPED_PARAMETER_HOLD_OFF);
}
static void mt9e013_start_stream(void)
{
mt9e013_i2c_write_w_sensor(0x301A, 0x8250);
mt9e013_i2c_write_w_sensor(0x301A, 0x8650);
mt9e013_i2c_write_w_sensor(0x301A, 0x8658);
mt9e013_i2c_write_b_sensor(0x0104, 0x00);
mt9e013_i2c_write_w_sensor(0x301A, 0x065C);
}
static void mt9e013_stop_stream(void)
{
mt9e013_i2c_write_w_sensor(0x301A, 0x0058);
mt9e013_i2c_write_w_sensor(0x301A, 0x0050);
mt9e013_i2c_write_b_sensor(0x0104, 0x01);
}
static void mt9e013_get_pict_fps(uint16_t fps, uint16_t *pfps)
{
/* input fps is preview fps in Q8 format */
uint32_t divider, d1, d2;
d1 = mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata
* 0x00000400/
mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
d2 = mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata
* 0x00000400/
mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
divider = d1 * d2 / 0x400;
/*Verify PCLK settings and frame sizes.*/
*pfps = (uint16_t) (fps * divider / 0x400);
/* 2 is the ratio of no.of snapshot channels
to number of preview channels */
}
static uint16_t mt9e013_get_prev_lines_pf(void)
{
if (mt9e013_ctrl->prev_res == QTR_SIZE)
return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->prev_res == FULL_SIZE)
return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->prev_res == HFR_60FPS)
return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->prev_res == HFR_90FPS)
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
else
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
}
static uint16_t mt9e013_get_prev_pixels_pl(void)
{
if (mt9e013_ctrl->prev_res == QTR_SIZE)
return mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->prev_res == FULL_SIZE)
return mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->prev_res == HFR_60FPS)
return mt9e013_regs.reg_60fps[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->prev_res == HFR_90FPS)
return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
else
return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
}
static uint16_t mt9e013_get_pict_lines_pf(void)
{
if (mt9e013_ctrl->pict_res == QTR_SIZE)
return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->pict_res == FULL_SIZE)
return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->pict_res == HFR_60FPS)
return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->pict_res == HFR_90FPS)
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
else
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
}
static uint16_t mt9e013_get_pict_pixels_pl(void)
{
if (mt9e013_ctrl->pict_res == QTR_SIZE)
return mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->pict_res == FULL_SIZE)
return mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->pict_res == HFR_60FPS)
return mt9e013_regs.reg_60fps[E013_LINE_LENGTH_PCK].wdata;
else if (mt9e013_ctrl->pict_res == HFR_90FPS)
return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
else
return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
}
static uint32_t mt9e013_get_pict_max_exp_lc(void)
{
if (mt9e013_ctrl->pict_res == QTR_SIZE)
return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata
* 24;
else if (mt9e013_ctrl->pict_res == FULL_SIZE)
return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata
* 24;
else if (mt9e013_ctrl->pict_res == HFR_60FPS)
return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata
* 24;
else if (mt9e013_ctrl->pict_res == HFR_90FPS)
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata
* 24;
else
return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata
* 24;
}
static int32_t mt9e013_set_fps(struct fps_cfg *fps)
{
uint16_t total_lines_per_frame;
int32_t rc = 0;
if (mt9e013_ctrl->curr_res == QTR_SIZE)
total_lines_per_frame =
mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->curr_res == FULL_SIZE)
total_lines_per_frame =
mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->curr_res == HFR_60FPS)
total_lines_per_frame =
mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
else if (mt9e013_ctrl->curr_res == HFR_90FPS)
total_lines_per_frame =
mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
else
total_lines_per_frame =
mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
mt9e013_ctrl->fps_divider = fps->fps_div;
mt9e013_ctrl->pict_fps_divider = fps->pict_fps_div;
if (mt9e013_ctrl->curr_res == FULL_SIZE) {
total_lines_per_frame = (uint16_t)
(total_lines_per_frame * mt9e013_ctrl->pict_fps_divider/0x400);
} else {
total_lines_per_frame = (uint16_t)
(total_lines_per_frame * mt9e013_ctrl->fps_divider/0x400);
}
mt9e013_group_hold_on();
rc = mt9e013_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES,
total_lines_per_frame);
mt9e013_group_hold_off();
return rc;
}
static int32_t mt9e013_write_exp_gain(uint16_t gain, uint32_t line)
{
uint16_t max_legal_gain = 0xE7F;
int32_t rc = 0;
if (gain > max_legal_gain) {
CDBG("Max legal gain Line:%d\n", __LINE__);
gain = max_legal_gain;
}
if (mt9e013_ctrl->curr_res != FULL_SIZE) {
mt9e013_ctrl->my_reg_gain = gain;
mt9e013_ctrl->my_reg_line_count = (uint16_t) line;
line = (uint32_t) (line * mt9e013_ctrl->fps_divider /
0x00000400);
} else {
line = (uint32_t) (line * mt9e013_ctrl->pict_fps_divider /
0x00000400);
}
gain |= 0x1000;
mt9e013_group_hold_on();
rc = mt9e013_i2c_write_w_sensor(REG_GLOBAL_GAIN, gain);
rc = mt9e013_i2c_write_w_sensor(REG_COARSE_INTEGRATION_TIME, line);
mt9e013_group_hold_off();
return rc;
}
static int32_t mt9e013_set_pict_exp_gain(uint16_t gain, uint32_t line)
{
int32_t rc = 0;
rc = mt9e013_write_exp_gain(gain, line);
mt9e013_i2c_write_w_sensor(0x301A, 0x065C|0x2);
return rc;
}
#define DIV_CEIL(x, y) (x/y + (x%y) ? 1 : 0)
static int32_t mt9e013_move_focus(int direction,
int32_t num_steps)
{
int16_t step_direction, dest_lens_position, dest_step_position;
int16_t target_dist, small_step, next_lens_position;
if (direction == MOVE_NEAR)
step_direction = 1;
else
step_direction = -1;
dest_step_position = mt9e013_ctrl->curr_step_pos
+ (step_direction * num_steps);
if (dest_step_position < 0)
dest_step_position = 0;
else if (dest_step_position > MT9E013_TOTAL_STEPS_NEAR_TO_FAR)
dest_step_position = MT9E013_TOTAL_STEPS_NEAR_TO_FAR;
if (dest_step_position == mt9e013_ctrl->curr_step_pos)
return 0;
dest_lens_position = mt9e013_step_position_table[dest_step_position];
target_dist = step_direction *
(dest_lens_position - mt9e013_ctrl->curr_lens_pos);
if (step_direction < 0 && (target_dist >=
mt9e013_step_position_table[mt9e013_damping_threshold])) {
small_step = DIV_CEIL(target_dist, 10);
mt9e013_sw_damping_time_wait = 10;
} else {
small_step = DIV_CEIL(target_dist, 4);
mt9e013_sw_damping_time_wait = 4;
}
for (next_lens_position = mt9e013_ctrl->curr_lens_pos
+ (step_direction * small_step);
(step_direction * next_lens_position) <=
(step_direction * dest_lens_position);
next_lens_position += (step_direction * small_step)) {
mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE,
next_lens_position);
mt9e013_ctrl->curr_lens_pos = next_lens_position;
usleep(mt9e013_sw_damping_time_wait*50);
}
if (mt9e013_ctrl->curr_lens_pos != dest_lens_position) {
mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE,
dest_lens_position);
usleep(mt9e013_sw_damping_time_wait*50);
}
mt9e013_ctrl->curr_lens_pos = dest_lens_position;
mt9e013_ctrl->curr_step_pos = dest_step_position;
return 0;
}
static int32_t mt9e013_set_default_focus(uint8_t af_step)
{
int32_t rc = 0;
if (mt9e013_ctrl->curr_step_pos != 0) {
rc = mt9e013_move_focus(MOVE_FAR,
mt9e013_ctrl->curr_step_pos);
} else {
mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, 0x00);
}
mt9e013_ctrl->curr_lens_pos = 0;
mt9e013_ctrl->curr_step_pos = 0;
return rc;
}
static void mt9e013_init_focus(void)
{
uint8_t i;
mt9e013_step_position_table[0] = 0;
for (i = 1; i <= MT9E013_TOTAL_STEPS_NEAR_TO_FAR; i++) {
if (i <= mt9e013_nl_region_boundary1) {
mt9e013_step_position_table[i] =
mt9e013_step_position_table[i-1]
+ mt9e013_nl_region_code_per_step1;
} else {
mt9e013_step_position_table[i] =
mt9e013_step_position_table[i-1]
+ mt9e013_l_region_code_per_step;
}
if (mt9e013_step_position_table[i] > 255)
mt9e013_step_position_table[i] = 255;
}
}
static int32_t mt9e013_test(enum mt9e013_test_mode_t mo)
{
int32_t rc = 0;
if (mo == TEST_OFF)
return rc;
else {
/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
1: Bypass Signal Processing
REG_0x30D8[5] is EBDMASK: 0:
Output Embedded data, 1: No output embedded data */
if (mt9e013_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
(uint8_t) mo) < 0) {
return rc;
}
}
return rc;
}
static int32_t mt9e013_sensor_setting(int update_type, int rt)
{
int32_t rc = 0;
struct msm_camera_csi_params mt9e013_csi_params;
uint8_t stored_af_step = 0;
CDBG("sensor_settings\n");
stored_af_step = mt9e013_ctrl->curr_step_pos;
mt9e013_set_default_focus(0);
mt9e013_stop_stream();
msleep(15);
if (update_type == REG_INIT) {
mt9e013_i2c_write_w_table(mt9e013_regs.reg_mipi,
mt9e013_regs.reg_mipi_size);
mt9e013_i2c_write_w_table(mt9e013_regs.rec_settings,
mt9e013_regs.rec_size);
cam_debug_init();
CSI_CONFIG = 0;
} else if (update_type == UPDATE_PERIODIC) {
if (rt == QTR_SIZE) {
mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll,
mt9e013_regs.reg_pll_size);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_prev,
mt9e013_regs.reg_prev_size);
} else if (rt == FULL_SIZE) {
mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll,
mt9e013_regs.reg_pll_size);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_snap,
mt9e013_regs.reg_snap_size);
} else if (rt == HFR_60FPS) {
mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
mt9e013_regs.reg_pll_120fps_size);
mt9e013_i2c_write_w_sensor(0x0306, 0x0029);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
mt9e013_regs.reg_120fps_size);
} else if (rt == HFR_90FPS) {
mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
mt9e013_regs.reg_pll_120fps_size);
mt9e013_i2c_write_w_sensor(0x0306, 0x003D);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
mt9e013_regs.reg_120fps_size);
} else if (rt == HFR_120FPS) {
msm_camio_vfe_clk_rate_set(266667000);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
mt9e013_regs.reg_pll_120fps_size);
mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
mt9e013_regs.reg_120fps_size);
}
if (!CSI_CONFIG) {
msm_camio_vfe_clk_rate_set(192000000);
mt9e013_csi_params.data_format = CSI_10BIT;
mt9e013_csi_params.lane_cnt = 2;
mt9e013_csi_params.lane_assign = 0xe4;
mt9e013_csi_params.dpcm_scheme = 0;
mt9e013_csi_params.settle_cnt = 0x18;
rc = msm_camio_csi_config(&mt9e013_csi_params);
msleep(10);
CSI_CONFIG = 1;
}
mt9e013_move_focus(MOVE_NEAR, stored_af_step);
mt9e013_start_stream();
}
return rc;
}
static int32_t mt9e013_video_config(int mode)
{
int32_t rc = 0;
CDBG("video config\n");
/* change sensor resolution if needed */
if (mt9e013_sensor_setting(UPDATE_PERIODIC,
mt9e013_ctrl->prev_res) < 0)
return rc;
if (mt9e013_ctrl->set_test) {
if (mt9e013_test(mt9e013_ctrl->set_test) < 0)
return rc;
}
mt9e013_ctrl->curr_res = mt9e013_ctrl->prev_res;
mt9e013_ctrl->sensormode = mode;
return rc;
}
static int32_t mt9e013_snapshot_config(int mode)
{
int32_t rc = 0;
/*change sensor resolution if needed */
if (mt9e013_ctrl->curr_res != mt9e013_ctrl->pict_res) {
if (mt9e013_sensor_setting(UPDATE_PERIODIC,
mt9e013_ctrl->pict_res) < 0)
return rc;
}
mt9e013_ctrl->curr_res = mt9e013_ctrl->pict_res;
mt9e013_ctrl->sensormode = mode;
return rc;
} /*end of mt9e013_snapshot_config*/
static int32_t mt9e013_raw_snapshot_config(int mode)
{
int32_t rc = 0;
/* change sensor resolution if needed */
if (mt9e013_ctrl->curr_res != mt9e013_ctrl->pict_res) {
if (mt9e013_sensor_setting(UPDATE_PERIODIC,
mt9e013_ctrl->pict_res) < 0)
return rc;
}
mt9e013_ctrl->curr_res = mt9e013_ctrl->pict_res;
mt9e013_ctrl->sensormode = mode;
return rc;
} /*end of mt9e013_raw_snapshot_config*/
static int32_t mt9e013_set_sensor_mode(int mode,
int res)
{
int32_t rc = 0;
switch (mode) {
case SENSOR_PREVIEW_MODE:
case SENSOR_HFR_60FPS_MODE:
case SENSOR_HFR_90FPS_MODE:
case SENSOR_HFR_120FPS_MODE:
mt9e013_ctrl->prev_res = res;
rc = mt9e013_video_config(mode);
break;
case SENSOR_SNAPSHOT_MODE:
mt9e013_ctrl->pict_res = res;
rc = mt9e013_snapshot_config(mode);
break;
case SENSOR_RAW_SNAPSHOT_MODE:
mt9e013_ctrl->pict_res = res;
rc = mt9e013_raw_snapshot_config(mode);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
static int32_t mt9e013_power_down(void)
{
return 0;
}
static int mt9e013_probe_init_done(const struct msm_camera_sensor_info *data)
{
CDBG("probe done\n");
gpio_free(data->sensor_reset);
return 0;
}
static int mt9e013_probe_init_sensor(const struct msm_camera_sensor_info *data)
{
int32_t rc = 0;
uint16_t chipid = 0;
CDBG("%s: %d\n", __func__, __LINE__);
rc = gpio_request(data->sensor_reset, "mt9e013");
CDBG(" mt9e013_probe_init_sensor\n");
if (!rc) {
CDBG("sensor_reset = %d\n", rc);
gpio_direction_output(data->sensor_reset, 0);
msleep(10);
gpio_set_value_cansleep(data->sensor_reset, 1);
msleep(10);
} else {
goto init_probe_done;
}
CDBG(" mt9e013_probe_init_sensor is called\n");
rc = mt9e013_i2c_read(0x0000, &chipid, 2);
CDBG("ID: %d\n", chipid);
/* 4. Compare sensor ID to MT9E013 ID: */
if (chipid != 0x4B00) {
rc = -ENODEV;
CDBG("mt9e013_probe_init_sensor fail chip id doesnot match\n");
goto init_probe_fail;
}
mt9e013_ctrl = kzalloc(sizeof(struct mt9e013_ctrl_t), GFP_KERNEL);
if (!mt9e013_ctrl) {
CDBG("mt9e013_init failed!\n");
rc = -ENOMEM;
}
mt9e013_ctrl->fps_divider = 1 * 0x00000400;
mt9e013_ctrl->pict_fps_divider = 1 * 0x00000400;
mt9e013_ctrl->set_test = TEST_OFF;
mt9e013_ctrl->prev_res = QTR_SIZE;
mt9e013_ctrl->pict_res = FULL_SIZE;
if (data)
mt9e013_ctrl->sensordata = data;
goto init_probe_done;
init_probe_fail:
CDBG(" mt9e013_probe_init_sensor fails\n");
gpio_set_value_cansleep(data->sensor_reset, 0);
mt9e013_probe_init_done(data);
init_probe_done:
CDBG(" mt9e013_probe_init_sensor finishes\n");
return rc;
}
/* camsensor_mt9e013_reset */
int mt9e013_sensor_open_init(const struct msm_camera_sensor_info *data)
{
int32_t rc = 0;
CDBG("%s: %d\n", __func__, __LINE__);
CDBG("Calling mt9e013_sensor_open_init\n");
mt9e013_ctrl = kzalloc(sizeof(struct mt9e013_ctrl_t), GFP_KERNEL);
if (!mt9e013_ctrl) {
CDBG("mt9e013_init failed!\n");
rc = -ENOMEM;
goto init_done;
}
mt9e013_ctrl->fps_divider = 1 * 0x00000400;
mt9e013_ctrl->pict_fps_divider = 1 * 0x00000400;
mt9e013_ctrl->set_test = TEST_OFF;
mt9e013_ctrl->prev_res = QTR_SIZE;
mt9e013_ctrl->pict_res = FULL_SIZE;
if (data)
mt9e013_ctrl->sensordata = data;
if (rc < 0) {
CDBG("Calling mt9e013_sensor_open_init fail1\n");
return rc;
}
CDBG("%s: %d\n", __func__, __LINE__);
/* enable mclk first */
msm_camio_clk_rate_set(MT9E013_MASTER_CLK_RATE);
rc = mt9e013_probe_init_sensor(data);
if (rc < 0)
goto init_fail;
CDBG("init settings\n");
rc = mt9e013_sensor_setting(REG_INIT, mt9e013_ctrl->prev_res);
mt9e013_ctrl->fps = 30*Q8;
mt9e013_init_focus();
if (rc < 0) {
gpio_set_value_cansleep(data->sensor_reset, 0);
goto init_fail;
} else
goto init_done;
init_fail:
CDBG("init_fail\n");
mt9e013_probe_init_done(data);
init_done:
CDBG("init_done\n");
return rc;
} /*endof mt9e013_sensor_open_init*/
static int mt9e013_init_client(struct i2c_client *client)
{
/* Initialize the MSM_CAMI2C Chip */
init_waitqueue_head(&mt9e013_wait_queue);
return 0;
}
static const struct i2c_device_id mt9e013_i2c_id[] = {
{"mt9e013", 0},
{ }
};
static int mt9e013_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc = 0;
CDBG("mt9e013_probe called!\n");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
CDBG("i2c_check_functionality failed\n");
goto probe_failure;
}
mt9e013_sensorw = kzalloc(sizeof(struct mt9e013_work_t), GFP_KERNEL);
if (!mt9e013_sensorw) {
CDBG("kzalloc failed.\n");
rc = -ENOMEM;
goto probe_failure;
}
i2c_set_clientdata(client, mt9e013_sensorw);
mt9e013_init_client(client);
mt9e013_client = client;
CDBG("mt9e013_probe successed! rc = %d\n", rc);
return 0;
probe_failure:
CDBG("mt9e013_probe failed! rc = %d\n", rc);
return rc;
}
static int mt9e013_send_wb_info(struct wb_info_cfg *wb)
{
return 0;
} /*end of mt9e013_snapshot_config*/
static int __exit mt9e013_remove(struct i2c_client *client)
{
struct mt9e013_work_t_t *sensorw = i2c_get_clientdata(client);
free_irq(client->irq, sensorw);
mt9e013_client = NULL;
kfree(sensorw);
return 0;
}
static struct i2c_driver mt9e013_i2c_driver = {
.id_table = mt9e013_i2c_id,
.probe = mt9e013_i2c_probe,
.remove = __exit_p(mt9e013_i2c_remove),
.driver = {
.name = "mt9e013",
},
};
int mt9e013_sensor_config(void __user *argp)
{
struct sensor_cfg_data cdata;
long rc = 0;
if (copy_from_user(&cdata,
(void *)argp,
sizeof(struct sensor_cfg_data)))
return -EFAULT;
mutex_lock(&mt9e013_mut);
CDBG("mt9e013_sensor_config: cfgtype = %d\n",
cdata.cfgtype);
switch (cdata.cfgtype) {
case CFG_GET_PICT_FPS:
mt9e013_get_pict_fps(
cdata.cfg.gfps.prevfps,
&(cdata.cfg.gfps.pictfps));
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_L_PF:
cdata.cfg.prevl_pf =
mt9e013_get_prev_lines_pf();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_P_PL:
cdata.cfg.prevp_pl =
mt9e013_get_prev_pixels_pl();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_L_PF:
cdata.cfg.pictl_pf =
mt9e013_get_pict_lines_pf();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_P_PL:
cdata.cfg.pictp_pl =
mt9e013_get_pict_pixels_pl();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_MAX_EXP_LC:
cdata.cfg.pict_max_exp_lc =
mt9e013_get_pict_max_exp_lc();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_SET_FPS:
case CFG_SET_PICT_FPS:
rc = mt9e013_set_fps(&(cdata.cfg.fps));
break;
case CFG_SET_EXP_GAIN:
rc =
mt9e013_write_exp_gain(
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_PICT_EXP_GAIN:
rc =
mt9e013_set_pict_exp_gain(
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_MODE:
rc = mt9e013_set_sensor_mode(cdata.mode,
cdata.rs);
break;
case CFG_PWR_DOWN:
rc = mt9e013_power_down();
break;
case CFG_MOVE_FOCUS:
rc =
mt9e013_move_focus(
cdata.cfg.focus.dir,
cdata.cfg.focus.steps);
break;
case CFG_SET_DEFAULT_FOCUS:
rc =
mt9e013_set_default_focus(
cdata.cfg.focus.steps);
break;
case CFG_GET_AF_MAX_STEPS:
cdata.max_steps = MT9E013_TOTAL_STEPS_NEAR_TO_FAR;
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_SET_EFFECT:
rc = mt9e013_set_default_focus(
cdata.cfg.effect);
break;
case CFG_SEND_WB_INFO:
rc = mt9e013_send_wb_info(
&(cdata.cfg.wb_info));
break;
default:
rc = -EFAULT;
break;
}
mutex_unlock(&mt9e013_mut);
return rc;
}
static int mt9e013_sensor_release(void)
{
int rc = -EBADF;
mutex_lock(&mt9e013_mut);
mt9e013_power_down();
gpio_set_value_cansleep(mt9e013_ctrl->sensordata->sensor_reset, 0);
msleep(5);
gpio_free(mt9e013_ctrl->sensordata->sensor_reset);
kfree(mt9e013_ctrl);
mt9e013_ctrl = NULL;
CDBG("mt9e013_release completed\n");
mutex_unlock(&mt9e013_mut);
return rc;
}
static int mt9e013_sensor_probe(const struct msm_camera_sensor_info *info,
struct msm_sensor_ctrl *s)
{
int rc = 0;
rc = i2c_add_driver(&mt9e013_i2c_driver);
if (rc < 0 || mt9e013_client == NULL) {
rc = -ENOTSUPP;
CDBG("I2C add driver failed");
goto probe_fail;
}
msm_camio_clk_rate_set(MT9E013_MASTER_CLK_RATE);
rc = mt9e013_probe_init_sensor(info);
if (rc < 0)
goto probe_fail;
s->s_init = mt9e013_sensor_open_init;
s->s_release = mt9e013_sensor_release;
s->s_config = mt9e013_sensor_config;
s->s_mount_angle = info->sensor_platform_info->mount_angle;
gpio_set_value_cansleep(info->sensor_reset, 0);
mt9e013_probe_init_done(info);
return rc;
probe_fail:
CDBG("mt9e013_sensor_probe: SENSOR PROBE FAILS!\n");
return rc;
}
static int __mt9e013_probe(struct platform_device *pdev)
{
return msm_camera_drv_start(pdev, mt9e013_sensor_probe);
}
static struct platform_driver msm_camera_driver = {
.probe = __mt9e013_probe,
.driver = {
.name = "msm_camera_mt9e013",
.owner = THIS_MODULE,
},
};
static int __init mt9e013_init(void)
{
return platform_driver_register(&msm_camera_driver);
}
module_init(mt9e013_init);
void mt9e013_exit(void)
{
i2c_del_driver(&mt9e013_i2c_driver);
}
MODULE_DESCRIPTION("Aptina 8 MP Bayer sensor driver");
MODULE_LICENSE("GPL v2");
static bool streaming = 1;
static int mt9e013_focus_test(void *data, u64 *val)
{
int i = 0;
mt9e013_set_default_focus(0);
for (i = 90; i < 256; i++) {
mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, i);
msleep(5000);
}
msleep(5000);
for (i = 255; i > 90; i--) {
mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, i);
msleep(5000);
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cam_focus, mt9e013_focus_test,
NULL, "%lld\n");
static int mt9e013_step_test(void *data, u64 *val)
{
int i = 0;
mt9e013_set_default_focus(0);
for (i = 0; i < MT9E013_TOTAL_STEPS_NEAR_TO_FAR; i++) {
mt9e013_move_focus(MOVE_NEAR, 1);
msleep(5000);
}
mt9e013_move_focus(MOVE_FAR, MT9E013_TOTAL_STEPS_NEAR_TO_FAR);
msleep(5000);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cam_step, mt9e013_step_test,
NULL, "%lld\n");
static int cam_debug_stream_set(void *data, u64 val)
{
int rc = 0;
if (val) {
mt9e013_start_stream();
streaming = 1;
} else {
mt9e013_stop_stream();
streaming = 0;
}
return rc;
}
static int cam_debug_stream_get(void *data, u64 *val)
{
*val = streaming;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cam_stream, cam_debug_stream_get,
cam_debug_stream_set, "%llu\n");
static int cam_debug_init(void)
{
struct dentry *cam_dir;
debugfs_base = debugfs_create_dir("sensor", NULL);
if (!debugfs_base)
return -ENOMEM;
cam_dir = debugfs_create_dir("mt9e013", debugfs_base);
if (!cam_dir)
return -ENOMEM;
if (!debugfs_create_file("focus", S_IRUGO | S_IWUSR, cam_dir,
NULL, &cam_focus))
return -ENOMEM;
if (!debugfs_create_file("step", S_IRUGO | S_IWUSR, cam_dir,
NULL, &cam_step))
return -ENOMEM;
if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, cam_dir,
NULL, &cam_stream))
return -ENOMEM;
return 0;
}
| gpl-2.0 |
rebel1/kernel-2.6.36.3-shuttle-P10AN01 | arch/powerpc/kernel/pci_of_scan.c | 240 | 10724 | /*
* Helper routines to scan the device tree for PCI devices and busses
*
* Migrated out of PowerPC architecture pci_64.c file by Grant Likely
* <grant.likely@secretlab.ca> so that these routines are available for
* 32 bit also.
*
* Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
* Rework, based on alpha PCI code.
* Copyright (c) 2009 Secret Lab Technologies Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/prom.h>
/**
* get_int_prop - Decode a u32 from a device tree property
*/
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
const u32 *prop;
int len;
prop = of_get_property(np, name, &len);
if (prop && len >= 4)
return *prop;
return def;
}
/**
* pci_parse_of_flags - Parse the flags cell of a device tree PCI address
* @addr0: value of 1st cell of a device tree PCI address.
* @bridge: Set this flag if the address is from a bridge 'ranges' property
*/
unsigned int pci_parse_of_flags(u32 addr0, int bridge)
{
unsigned int flags = 0;
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
| PCI_BASE_ADDRESS_MEM_PREFETCH;
/* Note: We don't know whether the ROM has been left enabled
* by the firmware or not. We mark it as disabled (ie, we do
* not set the IORESOURCE_ROM_ENABLE flag) for now rather than
* do a config space read, it will be force-enabled if needed
*/
if (!bridge && (addr0 & 0xff) == 0x30)
flags |= IORESOURCE_READONLY;
} else if (addr0 & 0x01000000)
flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
if (flags)
flags |= IORESOURCE_SIZEALIGN;
return flags;
}
/**
* of_pci_parse_addrs - Parse PCI addresses assigned in the device tree node
* @node: device tree node for the PCI device
* @dev: pci_dev structure for the device
*
* This function parses the 'assigned-addresses' property of a PCI devices'
* device tree node and writes them into the associated pci_dev structure.
*/
static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
{
u64 base, size;
unsigned int flags;
struct resource *res;
const u32 *addrs;
u32 i;
int proplen;
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(addrs[0], 0);
if (!flags)
continue;
base = of_read_number(&addrs[1], 2);
size = of_read_number(&addrs[3], 2);
if (!size)
continue;
i = addrs[0] & 0xff;
pr_debug(" base: %llx, size: %llx, i: %x\n",
(unsigned long long)base,
(unsigned long long)size, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
} else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
}
res->start = base;
res->end = base + size - 1;
res->flags = flags;
res->name = pci_name(dev);
}
}
/**
* of_create_pci_dev - Given a device tree node on a pci bus, create a pci_dev
* @node: device tree node pointer
* @bus: bus the device is sitting on
* @devfn: PCI function number, extracted from device tree by caller.
*/
struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
const char *type;
struct pci_slot *slot;
dev = alloc_pci_dev();
if (!dev)
return NULL;
type = of_get_property(node, "device_type", NULL);
if (type == NULL)
type = "";
pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
dev->bus = bus;
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
dev->needs_freset = 0; /* pcie fundamental reset required */
set_pcie_port_type(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = slot;
dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
dev->device = get_int_prop(node, "device-id", 0xffff);
dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
dev->cfg_size = pci_cfg_space_size(dev);
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
dev->class = get_int_prop(node, "class-code", 0);
dev->revision = get_int_prop(node, "revision-id", 0);
pr_debug(" class: 0x%x\n", dev->class);
pr_debug(" revision: 0x%x\n", dev->revision);
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
set_pcie_hotplug_bridge(dev);
} else if (!strcmp(type, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
/* Maybe do a default OF mapping here */
dev->irq = NO_IRQ;
}
of_pci_parse_addrs(node, dev);
pr_debug(" adding to system ...\n");
pci_device_add(dev, bus);
return dev;
}
EXPORT_SYMBOL(of_create_pci_dev);
/**
* of_scan_pci_bridge - Set up a PCI bridge and scan for child nodes
* @node: device tree node of bridge
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
* this routine in turn call of_scan_bus() recusively to scan for more child
* devices.
*/
void __devinit of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, mode;
struct resource *res;
unsigned int flags;
u64 size;
pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
node->full_name);
return;
}
ranges = of_get_property(node, "ranges", &len);
if (ranges == NULL) {
printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
node->full_name);
return;
}
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
printk(KERN_ERR "Failed to create pci bus for %s\n",
node->full_name);
return;
}
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
bus->sysdata = node;
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
res = &dev->resource[PCI_BRIDGE_RESOURCES];
for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
res->flags = 0;
bus->resource[i] = res;
++res;
}
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
flags = pci_parse_of_flags(ranges[0], 1);
size = of_read_number(&ranges[6], 2);
if (flags == 0 || size == 0)
continue;
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
printk(KERN_ERR "PCI: ignoring extra I/O range"
" for bridge %s\n", node->full_name);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
printk(KERN_ERR "PCI: too many memory ranges"
" for bridge %s\n", node->full_name);
continue;
}
res = bus->resource[i];
++i;
}
res->start = of_read_number(&ranges[1], 2);
res->end = res->start + size - 1;
res->flags = flags;
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
pr_debug(" bus name: %s\n", bus->name);
mode = PCI_PROBE_NORMAL;
if (ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
else if (mode == PCI_PROBE_NORMAL)
pci_scan_child_bus(bus);
}
EXPORT_SYMBOL(of_scan_pci_bridge);
/**
* __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
* @rescan_existing: Flag indicating bus has already been set up
*/
static void __devinit __of_scan_bus(struct device_node *node,
struct pci_bus *bus, int rescan_existing)
{
struct device_node *child;
const u32 *reg;
int reglen, devfn;
struct pci_dev *dev;
pr_debug("of_scan_bus(%s) bus no %d...\n",
node->full_name, bus->number);
/* Scan direct children */
for_each_child_of_node(node, child) {
pr_debug(" * %s\n", child->full_name);
if (!of_device_is_available(child))
continue;
reg = of_get_property(child, "reg", ®len);
if (reg == NULL || reglen < 20)
continue;
devfn = (reg[0] >> 8) & 0xff;
/* create a new pci_dev for this device */
dev = of_create_pci_dev(child, bus, devfn);
if (!dev)
continue;
pr_debug(" dev header type: %x\n", dev->hdr_type);
}
/* Apply all fixups necessary. We don't fixup the bus "self"
* for an existing bridge that is being rescanned
*/
if (!rescan_existing)
pcibios_setup_bus_self(bus);
pcibios_setup_bus_devices(bus);
/* Now scan child busses */
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
struct device_node *child = pci_device_to_OF_node(dev);
if (child)
of_scan_pci_bridge(child, dev);
}
}
}
/**
* of_scan_bus - given a PCI bus node, setup bus and scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
*/
void __devinit of_scan_bus(struct device_node *node,
struct pci_bus *bus)
{
__of_scan_bus(node, bus, 0);
}
EXPORT_SYMBOL_GPL(of_scan_bus);
/**
* of_rescan_bus - given a PCI bus node, scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
*
* Same as of_scan_bus, but for a pci_bus structure that has already been
* setup.
*/
void __devinit of_rescan_bus(struct device_node *node,
struct pci_bus *bus)
{
__of_scan_bus(node, bus, 1);
}
EXPORT_SYMBOL_GPL(of_rescan_bus);
| gpl-2.0 |
civato/Note8.0-StormBorn | arch/arm/mach-exynos/setup-fimd.c | 496 | 1662 | /* linux/arch/arm/mach-exynos/setup-fimd.c
*
* Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Base Exynos4 FIMD configuration
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <mach/regs-clock.h>
#include <mach/map.h>
void exynos4_fimd_cfg_gpios(unsigned int base, unsigned int nr,
unsigned int cfg, s5p_gpio_drvstr_t drvstr)
{
s3c_gpio_cfgrange_nopull(base, nr, cfg);
for (; nr > 0; nr--, base++)
s5p_gpio_set_drvstr(base, drvstr);
}
int __init exynos4_fimd_setup_clock(struct device *dev, const char *bus_clk,
const char *parent, unsigned long clk_rate)
{
struct clk *clk_parent;
struct clk *sclk;
sclk = clk_get(dev, bus_clk);
if (IS_ERR(sclk))
return PTR_ERR(sclk);
clk_parent = clk_get(NULL, parent);
if (IS_ERR(clk_parent)) {
clk_put(sclk);
return PTR_ERR(clk_parent);
}
if (clk_set_parent(sclk, clk_parent)) {
pr_err("Unable to set parent %s of clock %s.\n",
clk_parent->name, sclk->name);
clk_put(sclk);
clk_put(clk_parent);
return PTR_ERR(sclk);
}
if (!clk_rate)
clk_rate = 87000000UL;
if (clk_set_rate(sclk, clk_rate)) {
pr_err("%s rate change failed: %lu\n", sclk->name, clk_rate);
clk_put(sclk);
clk_put(clk_parent);
return PTR_ERR(sclk);
}
clk_put(sclk);
clk_put(clk_parent);
return 0;
}
| gpl-2.0 |
carz2/cm-kernel | arch/x86/kernel/module.c | 752 | 6451 | /* Kernel module help for x86.
Copyright (C) 2001 Rusty Russell.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
void *module_alloc(unsigned long size)
{
struct vm_struct *area;
if (!size)
return NULL;
size = PAGE_ALIGN(size);
if (size > MODULES_LEN)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_EXEC);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
}
/* We don't need anything special. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
#ifdef CONFIG_X86_32
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_386_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_386_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
me->name);
return -ENOEXEC;
}
#else /*X86_64*/
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
void *loc;
u64 val;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)loc);
val = sym->st_value + rel[i].r_addend;
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_X86_64_NONE:
break;
case R_X86_64_64:
*(u64 *)loc = val;
break;
case R_X86_64_32:
*(u32 *)loc = val;
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
*(s32 *)loc = val;
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
#endif
break;
default:
printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
overflow:
printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
me->name);
return -ENOEXEC;
}
int apply_relocate(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
printk(KERN_ERR "non add relocation not supported\n");
return -ENOSYS;
}
#endif
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".text", secstrings + s->sh_name))
text = s;
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}
if (alt) {
/* patch .altinstructions */
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
if (locks && text) {
void *lseg = (void *)locks->sh_addr;
void *tseg = (void *)text->sh_addr;
alternatives_smp_module_add(me, me->name,
lseg, lseg + locks->sh_size,
tseg, tseg + text->sh_size);
}
if (para) {
void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
module_bug_cleanup(mod);
}
| gpl-2.0 |
rodero95/android_kernel_ti_omap | fs/drop_caches.c | 752 | 1304 | /*
* Implement the manual drop-all-pagecache function
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
struct inode *inode, *toput_inode = NULL;
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
continue;
if (inode->i_mapping->nrpages == 0)
continue;
__iget(inode);
spin_unlock(&inode_lock);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
spin_lock(&inode_lock);
}
spin_unlock(&inode_lock);
iput(toput_inode);
}
static void drop_slab(void)
{
int nr_objects;
do {
nr_objects = shrink_slab(1000, GFP_KERNEL, 1000);
} while (nr_objects > 10);
}
int drop_caches_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec_minmax(table, write, buffer, length, ppos);
if (write) {
if (sysctl_drop_caches & 1)
iterate_supers(drop_pagecache_sb, NULL);
if (sysctl_drop_caches & 2)
drop_slab();
}
return 0;
}
| gpl-2.0 |
playfulgod/android_kernel_lge_ms910 | drivers/net/wireless/b43legacy/xmit.c | 1008 | 18481 | /*
Broadcom B43legacy wireless driver
Transmission (TX/RX) related functions.
Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
Copyright (C) 2005 Stefano Brivio <stefano.brivio@polimi.it>
Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de>
Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (C) 2007 Larry Finger <Larry.Finger@lwfinger.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include <net/dst.h>
#include "xmit.h"
#include "phy.h"
#include "dma.h"
#include "pio.h"
/* Extract the bitrate out of a CCK PLCP header. */
static u8 b43legacy_plcp_get_bitrate_idx_cck(struct b43legacy_plcp_hdr6 *plcp)
{
switch (plcp->raw[0]) {
case 0x0A:
return 0;
case 0x14:
return 1;
case 0x37:
return 2;
case 0x6E:
return 3;
}
B43legacy_BUG_ON(1);
return -1;
}
/* Extract the bitrate out of an OFDM PLCP header. */
static u8 b43legacy_plcp_get_bitrate_idx_ofdm(struct b43legacy_plcp_hdr6 *plcp,
bool aphy)
{
int base = aphy ? 0 : 4;
switch (plcp->raw[0] & 0xF) {
case 0xB:
return base + 0;
case 0xF:
return base + 1;
case 0xA:
return base + 2;
case 0xE:
return base + 3;
case 0x9:
return base + 4;
case 0xD:
return base + 5;
case 0x8:
return base + 6;
case 0xC:
return base + 7;
}
B43legacy_BUG_ON(1);
return -1;
}
u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate)
{
switch (bitrate) {
case B43legacy_CCK_RATE_1MB:
return 0x0A;
case B43legacy_CCK_RATE_2MB:
return 0x14;
case B43legacy_CCK_RATE_5MB:
return 0x37;
case B43legacy_CCK_RATE_11MB:
return 0x6E;
}
B43legacy_BUG_ON(1);
return 0;
}
u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate)
{
switch (bitrate) {
case B43legacy_OFDM_RATE_6MB:
return 0xB;
case B43legacy_OFDM_RATE_9MB:
return 0xF;
case B43legacy_OFDM_RATE_12MB:
return 0xA;
case B43legacy_OFDM_RATE_18MB:
return 0xE;
case B43legacy_OFDM_RATE_24MB:
return 0x9;
case B43legacy_OFDM_RATE_36MB:
return 0xD;
case B43legacy_OFDM_RATE_48MB:
return 0x8;
case B43legacy_OFDM_RATE_54MB:
return 0xC;
}
B43legacy_BUG_ON(1);
return 0;
}
void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp,
const u16 octets, const u8 bitrate)
{
__le32 *data = &(plcp->data);
__u8 *raw = plcp->raw;
if (b43legacy_is_ofdm_rate(bitrate)) {
u16 d;
d = b43legacy_plcp_get_ratecode_ofdm(bitrate);
B43legacy_WARN_ON(octets & 0xF000);
d |= (octets << 5);
*data = cpu_to_le32(d);
} else {
u32 plen;
plen = octets * 16 / bitrate;
if ((octets * 16 % bitrate) > 0) {
plen++;
if ((bitrate == B43legacy_CCK_RATE_11MB)
&& ((octets * 8 % 11) < 4))
raw[1] = 0x84;
else
raw[1] = 0x04;
} else
raw[1] = 0x04;
*data |= cpu_to_le32(plen << 16);
raw[0] = b43legacy_plcp_get_ratecode_cck(bitrate);
}
}
static u8 b43legacy_calc_fallback_rate(u8 bitrate)
{
switch (bitrate) {
case B43legacy_CCK_RATE_1MB:
return B43legacy_CCK_RATE_1MB;
case B43legacy_CCK_RATE_2MB:
return B43legacy_CCK_RATE_1MB;
case B43legacy_CCK_RATE_5MB:
return B43legacy_CCK_RATE_2MB;
case B43legacy_CCK_RATE_11MB:
return B43legacy_CCK_RATE_5MB;
case B43legacy_OFDM_RATE_6MB:
return B43legacy_CCK_RATE_5MB;
case B43legacy_OFDM_RATE_9MB:
return B43legacy_OFDM_RATE_6MB;
case B43legacy_OFDM_RATE_12MB:
return B43legacy_OFDM_RATE_9MB;
case B43legacy_OFDM_RATE_18MB:
return B43legacy_OFDM_RATE_12MB;
case B43legacy_OFDM_RATE_24MB:
return B43legacy_OFDM_RATE_18MB;
case B43legacy_OFDM_RATE_36MB:
return B43legacy_OFDM_RATE_24MB;
case B43legacy_OFDM_RATE_48MB:
return B43legacy_OFDM_RATE_36MB;
case B43legacy_OFDM_RATE_54MB:
return B43legacy_OFDM_RATE_48MB;
}
B43legacy_BUG_ON(1);
return 0;
}
static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
struct b43legacy_txhdr_fw3 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
struct ieee80211_tx_info *info,
u16 cookie)
{
const struct ieee80211_hdr *wlhdr;
int use_encryption = !!info->control.hw_key;
u8 rate;
struct ieee80211_rate *rate_fb;
int rate_ofdm;
int rate_fb_ofdm;
unsigned int plcp_fragment_len;
u32 mac_ctl = 0;
u16 phy_ctl = 0;
struct ieee80211_rate *tx_rate;
struct ieee80211_tx_rate *rates;
wlhdr = (const struct ieee80211_hdr *)fragment_data;
memset(txhdr, 0, sizeof(*txhdr));
tx_rate = ieee80211_get_tx_rate(dev->wl->hw, info);
rate = tx_rate->hw_value;
rate_ofdm = b43legacy_is_ofdm_rate(rate);
rate_fb = ieee80211_get_alt_retry_rate(dev->wl->hw, info, 0) ? : tx_rate;
rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
txhdr->mac_frame_ctl = wlhdr->frame_control;
memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
/* Calculate duration for fallback rate */
if ((rate_fb->hw_value == rate) ||
(wlhdr->duration_id & cpu_to_le16(0x8000)) ||
(wlhdr->duration_id == cpu_to_le16(0))) {
/* If the fallback rate equals the normal rate or the
* dur_id field contains an AID, CFP magic or 0,
* use the original dur_id field. */
txhdr->dur_fb = wlhdr->duration_id;
} else {
txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
info->control.vif,
fragment_len,
rate_fb);
}
plcp_fragment_len = fragment_len + FCS_LEN;
if (use_encryption) {
u8 key_idx = info->control.hw_key->hw_key_idx;
struct b43legacy_key *key;
int wlhdr_len;
size_t iv_len;
B43legacy_WARN_ON(key_idx >= dev->max_nr_keys);
key = &(dev->key[key_idx]);
if (key->enabled) {
/* Hardware appends ICV. */
plcp_fragment_len += info->control.hw_key->icv_len;
key_idx = b43legacy_kidx_to_fw(dev, key_idx);
mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) &
B43legacy_TX4_MAC_KEYIDX;
mac_ctl |= (key->algorithm <<
B43legacy_TX4_MAC_KEYALG_SHIFT) &
B43legacy_TX4_MAC_KEYALG;
wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
iv_len = min((size_t)info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
} else {
/* This key is invalid. This might only happen
* in a short timeframe after machine resume before
* we were able to reconfigure keys.
* Drop this packet completely. Do not transmit it
* unencrypted to avoid leaking information. */
return -ENOKEY;
}
}
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp), plcp_fragment_len,
rate);
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp_fb), plcp_fragment_len,
rate_fb->hw_value);
/* PHY TX Control word */
if (rate_ofdm)
phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
switch (info->antenna_sel_tx) {
case 0:
phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
break;
case 1:
phy_ctl |= B43legacy_TX4_PHY_ANT0;
break;
case 2:
phy_ctl |= B43legacy_TX4_PHY_ANT1;
break;
default:
B43legacy_BUG_ON(1);
}
/* MAC control */
rates = info->control.rates;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
mac_ctl |= B43legacy_TX4_MAC_ACK;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
mac_ctl |= B43legacy_TX4_MAC_HWSEQ;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
mac_ctl |= B43legacy_TX4_MAC_STMSDU;
if (rate_fb_ofdm)
mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM;
/* Overwrite rates[0].count to make the retry calculation
* in the tx status easier. need the actual retry limit to
* detect whether the fallback rate was used.
*/
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) {
rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count;
mac_ctl |= B43legacy_TX4_MAC_LONGFRAME;
} else {
rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count;
}
/* Generate the RTS or CTS-to-self frame */
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
unsigned int len;
struct ieee80211_hdr *hdr;
int rts_rate;
int rts_rate_fb;
int rts_rate_ofdm;
int rts_rate_fb_ofdm;
rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
if (rts_rate_fb_ofdm)
mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM;
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
ieee80211_ctstoself_get(dev->wl->hw,
info->control.vif,
fragment_data,
fragment_len, info,
(struct ieee80211_cts *)
(txhdr->rts_frame));
mac_ctl |= B43legacy_TX4_MAC_SENDCTS;
len = sizeof(struct ieee80211_cts);
} else {
ieee80211_rts_get(dev->wl->hw,
info->control.vif,
fragment_data, fragment_len, info,
(struct ieee80211_rts *)
(txhdr->rts_frame));
mac_ctl |= B43legacy_TX4_MAC_SENDRTS;
len = sizeof(struct ieee80211_rts);
}
len += FCS_LEN;
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp),
len, rts_rate);
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp_fb),
len, rts_rate_fb);
hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
txhdr->rts_dur_fb = hdr->duration_id;
}
/* Magic cookie */
txhdr->cookie = cpu_to_le16(cookie);
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
return 0;
}
int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
struct ieee80211_tx_info *info,
u16 cookie)
{
return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
fragment_data, fragment_len,
info, cookie);
}
static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev,
u8 in_rssi, int ofdm,
int adjust_2053, int adjust_2050)
{
struct b43legacy_phy *phy = &dev->phy;
s32 tmp;
switch (phy->radio_ver) {
case 0x2050:
if (ofdm) {
tmp = in_rssi;
if (tmp > 127)
tmp -= 256;
tmp *= 73;
tmp /= 64;
if (adjust_2050)
tmp += 25;
else
tmp -= 3;
} else {
if (dev->dev->bus->sprom.boardflags_lo
& B43legacy_BFL_RSSI) {
if (in_rssi > 63)
in_rssi = 63;
tmp = phy->nrssi_lt[in_rssi];
tmp = 31 - tmp;
tmp *= -131;
tmp /= 128;
tmp -= 57;
} else {
tmp = in_rssi;
tmp = 31 - tmp;
tmp *= -149;
tmp /= 128;
tmp -= 68;
}
if (phy->type == B43legacy_PHYTYPE_G &&
adjust_2050)
tmp += 25;
}
break;
case 0x2060:
if (in_rssi > 127)
tmp = in_rssi - 256;
else
tmp = in_rssi;
break;
default:
tmp = in_rssi;
tmp -= 11;
tmp *= 103;
tmp /= 64;
if (adjust_2053)
tmp -= 109;
else
tmp -= 83;
}
return (s8)tmp;
}
void b43legacy_rx(struct b43legacy_wldev *dev,
struct sk_buff *skb,
const void *_rxhdr)
{
struct ieee80211_rx_status status;
struct b43legacy_plcp_hdr6 *plcp;
struct ieee80211_hdr *wlhdr;
const struct b43legacy_rxhdr_fw3 *rxhdr = _rxhdr;
__le16 fctl;
u16 phystat0;
u16 phystat3;
u16 chanstat;
u16 mactime;
u32 macstat;
u16 chanid;
u8 jssi;
int padding;
memset(&status, 0, sizeof(status));
/* Get metadata about the frame from the header. */
phystat0 = le16_to_cpu(rxhdr->phy_status0);
phystat3 = le16_to_cpu(rxhdr->phy_status3);
jssi = rxhdr->jssi;
macstat = le16_to_cpu(rxhdr->mac_status);
mactime = le16_to_cpu(rxhdr->mac_time);
chanstat = le16_to_cpu(rxhdr->channel);
if (macstat & B43legacy_RX_MAC_FCSERR)
dev->wl->ieee_stats.dot11FCSErrorCount++;
/* Skip PLCP and padding */
padding = (macstat & B43legacy_RX_MAC_PADDING) ? 2 : 0;
if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) +
padding))) {
b43legacydbg(dev->wl, "RX: Packet size underrun (1)\n");
goto drop;
}
plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding);
skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding);
/* The skb contains the Wireless Header + payload data now */
if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) {
b43legacydbg(dev->wl, "RX: Packet size underrun (2)\n");
goto drop;
}
wlhdr = (struct ieee80211_hdr *)(skb->data);
fctl = wlhdr->frame_control;
if ((macstat & B43legacy_RX_MAC_DEC) &&
!(macstat & B43legacy_RX_MAC_DECERR)) {
unsigned int keyidx;
int wlhdr_len;
int iv_len;
int icv_len;
keyidx = ((macstat & B43legacy_RX_MAC_KEYIDX)
>> B43legacy_RX_MAC_KEYIDX_SHIFT);
/* We must adjust the key index here. We want the "physical"
* key index, but the ucode passed it slightly different.
*/
keyidx = b43legacy_kidx_to_raw(dev, keyidx);
B43legacy_WARN_ON(keyidx >= dev->max_nr_keys);
if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) {
/* Remove PROTECTED flag to mark it as decrypted. */
B43legacy_WARN_ON(!ieee80211_has_protected(fctl));
fctl &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
wlhdr->frame_control = fctl;
wlhdr_len = ieee80211_hdrlen(fctl);
if (unlikely(skb->len < (wlhdr_len + 3))) {
b43legacydbg(dev->wl, "RX: Packet size"
" underrun3\n");
goto drop;
}
if (skb->data[wlhdr_len + 3] & (1 << 5)) {
/* The Ext-IV Bit is set in the "KeyID"
* octet of the IV.
*/
iv_len = 8;
icv_len = 8;
} else {
iv_len = 4;
icv_len = 4;
}
if (unlikely(skb->len < (wlhdr_len + iv_len +
icv_len))) {
b43legacydbg(dev->wl, "RX: Packet size"
" underrun4\n");
goto drop;
}
/* Remove the IV */
memmove(skb->data + iv_len, skb->data, wlhdr_len);
skb_pull(skb, iv_len);
/* Remove the ICV */
skb_trim(skb, skb->len - icv_len);
status.flag |= RX_FLAG_DECRYPTED;
}
}
status.signal = b43legacy_rssi_postprocess(dev, jssi,
(phystat0 & B43legacy_RX_PHYST0_OFDM),
(phystat0 & B43legacy_RX_PHYST0_GAINCTL),
(phystat3 & B43legacy_RX_PHYST3_TRSTATE));
/* change to support A PHY */
if (phystat0 & B43legacy_RX_PHYST0_OFDM)
status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
else
status.rate_idx = b43legacy_plcp_get_bitrate_idx_cck(plcp);
status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT);
/*
* All frames on monitor interfaces and beacons always need a full
* 64-bit timestamp. Monitor interfaces need it for diagnostic
* purposes and beacons for IBSS merging.
* This code assumes we get to process the packet within 16 bits
* of timestamp, i.e. about 65 milliseconds after the PHY received
* the first symbol.
*/
if (ieee80211_is_beacon(fctl) || dev->wl->radiotap_enabled) {
u16 low_mactime_now;
b43legacy_tsf_read(dev, &status.mactime);
low_mactime_now = status.mactime;
status.mactime = status.mactime & ~0xFFFFULL;
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
status.flag |= RX_FLAG_TSFT;
}
chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
B43legacy_RX_CHAN_ID_SHIFT;
switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
status.freq = chanid + 2400;
break;
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(dev->wl->hw, skb);
return;
drop:
b43legacydbg(dev->wl, "RX: Packet dropped\n");
dev_kfree_skb_any(skb);
}
void b43legacy_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status)
{
b43legacy_debugfs_log_txstat(dev, status);
if (status->intermediate)
return;
if (status->for_ampdu)
return;
if (!status->acked)
dev->wl->ieee_stats.dot11ACKFailureCount++;
if (status->rts_count) {
if (status->rts_count == 0xF) /* FIXME */
dev->wl->ieee_stats.dot11RTSFailureCount++;
else
dev->wl->ieee_stats.dot11RTSSuccessCount++;
}
if (b43legacy_using_pio(dev))
b43legacy_pio_handle_txstatus(dev, status);
else
b43legacy_dma_handle_txstatus(dev, status);
}
/* Handle TX status report as received through DMA/PIO queues */
void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev,
const struct b43legacy_hwtxstatus *hw)
{
struct b43legacy_txstatus status;
u8 tmp;
status.cookie = le16_to_cpu(hw->cookie);
status.seq = le16_to_cpu(hw->seq);
status.phy_stat = hw->phy_stat;
tmp = hw->count;
status.frame_count = (tmp >> 4);
status.rts_count = (tmp & 0x0F);
tmp = hw->flags << 1;
status.supp_reason = ((tmp & 0x1C) >> 2);
status.pm_indicated = !!(tmp & 0x80);
status.intermediate = !!(tmp & 0x40);
status.for_ampdu = !!(tmp & 0x20);
status.acked = !!(tmp & 0x02);
b43legacy_handle_txstatus(dev, &status);
}
/* Stop any TX operation on the device (suspend the hardware queues) */
void b43legacy_tx_suspend(struct b43legacy_wldev *dev)
{
if (b43legacy_using_pio(dev))
b43legacy_pio_freeze_txqueues(dev);
else
b43legacy_dma_tx_suspend(dev);
}
/* Resume any TX operation on the device (resume the hardware queues) */
void b43legacy_tx_resume(struct b43legacy_wldev *dev)
{
if (b43legacy_using_pio(dev))
b43legacy_pio_thaw_txqueues(dev);
else
b43legacy_dma_tx_resume(dev);
}
/* Initialize the QoS parameters */
void b43legacy_qos_init(struct b43legacy_wldev *dev)
{
/* FIXME: This function must probably be called from the mac80211
* config callback. */
return;
b43legacy_hf_write(dev, b43legacy_hf_read(dev) | B43legacy_HF_EDCF);
/* FIXME kill magic */
b43legacy_write16(dev, 0x688,
b43legacy_read16(dev, 0x688) | 0x4);
/*TODO: We might need some stack support here to get the values. */
}
| gpl-2.0 |
1N4148/kernel_golden | drivers/pci/pci.c | 2288 | 93105 | /*
* PCI Bus Services, see include/linux/pci.h for further explanation.
*
* Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
*
* Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <asm/setup.h>
#include "pci.h"
const char *pci_power_names[] = {
"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
};
EXPORT_SYMBOL_GPL(pci_power_names);
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
unsigned int pci_pm_d3_delay;
static void pci_pme_list_scan(struct work_struct *work);
static LIST_HEAD(pci_pme_list);
static DEFINE_MUTEX(pci_pme_list_mutex);
static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
struct pci_pme_device {
struct list_head list;
struct pci_dev *dev;
};
#define PME_TIMEOUT 1000 /* How long between PME checks */
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
unsigned int delay = dev->d3_delay;
if (delay < pci_pm_d3_delay)
delay = pci_pm_d3_delay;
msleep(delay);
}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
#endif
#define DEFAULT_CARDBUS_IO_SIZE (256)
#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
/* pci=cbmemsize=nnM,cbiosize=nn can override this */
unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
#define DEFAULT_HOTPLUG_IO_SIZE (256)
#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
/* pci=hpmemsize=nnM,hpiosize=nn can override this */
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
/*
* The default CLS is used if arch didn't set CLS explicitly and not
* all pci devices agree on the same value. Arch can override either
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
u8 pci_cache_line_size;
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
*
* Given a PCI bus, returns the highest PCI bus number present in the set
* including the given PCI bus and its list of child PCI buses.
*/
unsigned char pci_bus_max_busnr(struct pci_bus* bus)
{
struct list_head *tmp;
unsigned char max, n;
max = bus->subordinate;
list_for_each(tmp, &bus->children) {
n = pci_bus_max_busnr(pci_bus_b(tmp));
if(n > max)
max = n;
}
return max;
}
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
WARN_ON(1);
return NULL;
}
return ioremap_nocache(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
#endif
#if 0
/**
* pci_max_busnr - returns maximum PCI bus number
*
* Returns the highest PCI bus number present in the system global list of
* PCI buses.
*/
unsigned char __devinit
pci_max_busnr(void)
{
struct pci_bus *bus = NULL;
unsigned char max, n;
max = 0;
while ((bus = pci_find_next_bus(bus)) != NULL) {
n = pci_bus_max_busnr(bus);
if(n > max)
max = n;
}
return max;
}
#endif /* 0 */
#define PCI_FIND_CAP_TTL 48
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
{
u8 id;
while ((*ttl)--) {
pci_bus_read_config_byte(bus, devfn, pos, &pos);
if (pos < 0x40)
break;
pos &= ~3;
pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
&id);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos += PCI_CAP_LIST_NEXT;
}
return 0;
}
static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap)
{
int ttl = PCI_FIND_CAP_TTL;
return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
}
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
{
return __pci_find_next_cap(dev->bus, dev->devfn,
pos + PCI_CAP_LIST_NEXT, cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_capability);
static int __pci_bus_find_cap_start(struct pci_bus *bus,
unsigned int devfn, u8 hdr_type)
{
u16 status;
pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
switch (hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
case PCI_HEADER_TYPE_BRIDGE:
return PCI_CAPABILITY_LIST;
case PCI_HEADER_TYPE_CARDBUS:
return PCI_CB_CAPABILITY_LIST;
default:
return 0;
}
return 0;
}
/**
* pci_find_capability - query for devices' capabilities
* @dev: PCI device to query
* @cap: capability code
*
* Tell if a device supports a given PCI capability.
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
* support it. Possible values for @cap:
*
* %PCI_CAP_ID_PM Power Management
* %PCI_CAP_ID_AGP Accelerated Graphics Port
* %PCI_CAP_ID_VPD Vital Product Data
* %PCI_CAP_ID_SLOTID Slot Identification
* %PCI_CAP_ID_MSI Message Signalled Interrupts
* %PCI_CAP_ID_CHSWP CompactPCI HotSwap
* %PCI_CAP_ID_PCIX PCI-X
* %PCI_CAP_ID_EXP PCI Express
*/
int pci_find_capability(struct pci_dev *dev, int cap)
{
int pos;
pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
if (pos)
pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
return pos;
}
/**
* pci_bus_find_capability - query for devices' capabilities
* @bus: the PCI bus to query
* @devfn: PCI device to query
* @cap: capability code
*
* Like pci_find_capability() but works for pci devices that do not have a
* pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
* support it.
*/
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
{
int pos;
u8 hdr_type;
pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
if (pos)
pos = __pci_find_next_cap(bus, devfn, pos, cap);
return pos;
}
/**
* pci_find_ext_capability - Find an extended capability
* @dev: PCI device to query
* @cap: capability code
*
* Returns the address of the requested extended capability structure
* within the device's PCI configuration space or 0 if the device does
* not support it. Possible values for @cap:
*
* %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
* %PCI_EXT_CAP_ID_VC Virtual Channel
* %PCI_EXT_CAP_ID_DSN Device Serial Number
* %PCI_EXT_CAP_ID_PWR Power Budgeting
*/
int pci_find_ext_capability(struct pci_dev *dev, int cap)
{
u32 header;
int ttl;
int pos = PCI_CFG_SPACE_SIZE;
/* minimum 8 bytes per capability */
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
return 0;
if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
return 0;
/*
* If we have no capabilities, this is indicated by cap ID,
* cap version and next pointer all being 0.
*/
if (header == 0)
return 0;
while (ttl-- > 0) {
if (PCI_EXT_CAP_ID(header) == cap)
return pos;
pos = PCI_EXT_CAP_NEXT(header);
if (pos < PCI_CFG_SPACE_SIZE)
break;
if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
/**
* pci_bus_find_ext_capability - find an extended capability
* @bus: the PCI bus to query
* @devfn: PCI device to query
* @cap: capability code
*
* Like pci_find_ext_capability() but works for pci devices that do not have a
* pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
* support it.
*/
int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
int cap)
{
u32 header;
int ttl;
int pos = PCI_CFG_SPACE_SIZE;
/* minimum 8 bytes per capability */
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
return 0;
if (header == 0xffffffff || header == 0)
return 0;
while (ttl-- > 0) {
if (PCI_EXT_CAP_ID(header) == cap)
return pos;
pos = PCI_EXT_CAP_NEXT(header);
if (pos < PCI_CFG_SPACE_SIZE)
break;
if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
break;
}
return 0;
}
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
u8 cap, mask;
if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
mask = HT_3BIT_CAP_MASK;
else
mask = HT_5BIT_CAP_MASK;
pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
PCI_CAP_ID_HT, &ttl);
while (pos) {
rc = pci_read_config_byte(dev, pos + 3, &cap);
if (rc != PCIBIOS_SUCCESSFUL)
return 0;
if ((cap & mask) == ht_cap)
return pos;
pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
pos + PCI_CAP_LIST_NEXT,
PCI_CAP_ID_HT, &ttl);
}
return 0;
}
/**
* pci_find_next_ht_capability - query a device's Hypertransport capabilities
* @dev: PCI device to query
* @pos: Position from which to continue searching
* @ht_cap: Hypertransport capability code
*
* To be used in conjunction with pci_find_ht_capability() to search for
* all capabilities matching @ht_cap. @pos should always be a value returned
* from pci_find_ht_capability().
*
* NB. To be 100% safe against broken PCI devices, the caller should take
* steps to avoid an infinite loop.
*/
int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
{
return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
/**
* pci_find_ht_capability - query a device's Hypertransport capabilities
* @dev: PCI device to query
* @ht_cap: Hypertransport capability code
*
* Tell if a device supports a given Hypertransport capability.
* Returns an address within the device's PCI configuration space
* or 0 in case the device does not support the request capability.
* The address points to the PCI capability, of type PCI_CAP_ID_HT,
* which has a Hypertransport capability matching @ht_cap.
*/
int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
{
int pos;
pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
if (pos)
pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
return pos;
}
EXPORT_SYMBOL_GPL(pci_find_ht_capability);
/**
* pci_find_parent_resource - return resource region of parent bus of given region
* @dev: PCI device structure contains resources to be searched
* @res: child resource record for which parent is sought
*
* For given resource region of given device, return the resource
* region of parent bus the given region is contained in or where
* it should be allocated from.
*/
struct resource *
pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
int i;
struct resource *best = NULL, *r;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
if (res->start && !(res->start >= r->start && res->end <= r->end))
continue; /* Not contained */
if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
if (r->flags & IORESOURCE_PREFETCH)
continue;
/* .. but we can put a prefetchable resource inside a non-prefetchable one */
if (!best)
best = r;
}
return best;
}
/**
* pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
* accessible by its driver.
*/
static void
pci_restore_bars(struct pci_dev *dev)
{
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
static struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
{
if (!ops->is_manageable || !ops->set_state || !ops->choose_state
|| !ops->sleep_wake || !ops->can_wakeup)
return -EINVAL;
pci_platform_pm = ops;
return 0;
}
static inline bool platform_pci_power_manageable(struct pci_dev *dev)
{
return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
}
static inline int platform_pci_set_power_state(struct pci_dev *dev,
pci_power_t t)
{
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
}
static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
{
return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
}
static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
}
static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
pci_platform_pm->run_wake(dev, enable) : -ENODEV;
}
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
* RETURN VALUE:
* -EINVAL if the requested state is invalid.
* -EIO if device does not support PCI PM or its PM capabilities register has a
* wrong version, or device doesn't support the requested state.
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
{
u16 pmcsr;
bool need_restore = false;
/* Check if we're already there */
if (dev->current_state == state)
return 0;
if (!dev->pm_cap)
return -EIO;
if (state < PCI_D0 || state > PCI_D3hot)
return -EINVAL;
/* Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
dev_err(&dev->dev, "invalid power transition "
"(from state %d to %d)\n", dev->current_state, state);
return -EINVAL;
}
/* check if this device supports the desired state */
if ((state == PCI_D1 && !dev->d1_support)
|| (state == PCI_D2 && !dev->d2_support))
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
/* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
switch (dev->current_state) {
case PCI_D0:
case PCI_D1:
case PCI_D2:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
case PCI_D3hot:
case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = true;
/* Fall-through: force to D0 */
default:
pmcsr = 0;
break;
}
/* enter specified state */
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state && printk_ratelimit())
dev_info(&dev->dev, "Refused to change power state, "
"currently in D%d\n", dev->current_state);
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
* For example, at least some versions of the 3c905B and the
* 3c556B exhibit this behaviour.
*
* At least some laptop BIOSen (e.g. the Thinkpad T21) leave
* devices in a D3hot state at boot. Consequently, we need to
* restore at least the BARs so that the device will be
* accessible to its driver.
*/
if (need_restore)
pci_restore_bars(dev);
if (dev->bus->self)
pcie_aspm_pm_state_change(dev->bus->self);
return 0;
}
/**
* pci_update_current_state - Read PCI power state of given device from its
* PCI PM registers and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
*/
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
dev->current_state = state;
}
}
/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
int error;
if (platform_pci_power_manageable(dev)) {
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
if (!dev->pm_cap)
dev->current_state = PCI_D0;
}
return error;
}
/**
* __pci_start_power_transition - Start power transition of a PCI device
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
{
if (state == PCI_D0)
pci_platform_power_transition(dev, PCI_D0);
}
/**
* __pci_complete_power_transition - Complete power transition of a PCI device
* @dev: PCI device to handle.
* @state: State to put the device into.
*
* This function should not be called directly by device drivers.
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
return state >= PCI_D0 ?
pci_platform_power_transition(dev, state) : -EINVAL;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
* Transition a device to a new power state, using the platform firmware and/or
* the device's PCI PM registers.
*
* RETURN VALUE:
* -EINVAL if the requested state is invalid.
* -EIO if device does not support PCI PM or its PM capabilities register has a
* wrong version, or device doesn't support the requested state.
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
int error;
/* bound the state we're entering */
if (state > PCI_D3hot)
state = PCI_D3hot;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
/*
* If the device or the parent bridge do not support PCI PM,
* ignore the request if we're doing anything other than putting
* it into D0 (which would only happen on boot).
*/
return 0;
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
don't put it in D3 */
if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
error = pci_raw_set_power_state(dev, state);
if (!__pci_complete_power_transition(dev, state))
error = 0;
/*
* When aspm_policy is "powersave" this call ensures
* that ASPM is configured.
*/
if (!error && dev->bus->self)
pcie_aspm_powersave_config_link(dev->bus->self);
return error;
}
/**
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
* that is passed to suspend() function.
*
* Returns PCI power state suitable for given device and given system
* message.
*/
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
pci_power_t ret;
if (!pci_find_capability(dev, PCI_CAP_ID_PM))
return PCI_D0;
ret = platform_pci_choose_state(dev);
if (ret != PCI_POWER_ERROR)
return ret;
switch (state.event) {
case PM_EVENT_ON:
return PCI_D0;
case PM_EVENT_FREEZE:
case PM_EVENT_PRETHAW:
/* REVISIT both freeze and pre-thaw "should" use D0 */
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
return PCI_D3hot;
default:
dev_info(&dev->dev, "unrecognized suspend event %d\n",
state.event);
BUG();
}
return PCI_D0;
}
EXPORT_SYMBOL(pci_choose_state);
#define PCI_EXP_SAVE_REGS 7
#define pcie_cap_has_devctl(type, flags) 1
#define pcie_cap_has_lnkctl(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1 || \
(type == PCI_EXP_TYPE_ROOT_PORT || \
type == PCI_EXP_TYPE_ENDPOINT || \
type == PCI_EXP_TYPE_LEG_END))
#define pcie_cap_has_sltctl(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1 || \
((type == PCI_EXP_TYPE_ROOT_PORT) || \
(type == PCI_EXP_TYPE_DOWNSTREAM && \
(flags & PCI_EXP_FLAGS_SLOT))))
#define pcie_cap_has_rtctl(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1 || \
(type == PCI_EXP_TYPE_ROOT_PORT || \
type == PCI_EXP_TYPE_RC_EC))
#define pcie_cap_has_devctl2(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1)
#define pcie_cap_has_lnkctl2(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1)
#define pcie_cap_has_sltctl2(type, flags) \
((flags & PCI_EXP_FLAGS_VERS) > 1)
static int pci_save_pcie_state(struct pci_dev *dev)
{
int pos, i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
u16 flags;
pos = pci_pcie_cap(dev);
if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state) {
dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
if (pcie_cap_has_devctl(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
if (pcie_cap_has_sltctl(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
if (pcie_cap_has_rtctl(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
if (pcie_cap_has_devctl2(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
{
int i = 0, pos;
struct pci_cap_saved_state *save_state;
u16 *cap;
u16 flags;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!save_state || pos <= 0)
return;
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
if (pcie_cap_has_devctl(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
if (pcie_cap_has_sltctl(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
if (pcie_cap_has_rtctl(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
if (pcie_cap_has_devctl2(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
}
static int pci_save_pcix_state(struct pci_dev *dev)
{
int pos;
struct pci_cap_saved_state *save_state;
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (pos <= 0)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
if (!save_state) {
dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
pci_read_config_word(dev, pos + PCI_X_CMD,
(u16 *)save_state->cap.data);
return 0;
}
static void pci_restore_pcix_state(struct pci_dev *dev)
{
int i = 0, pos;
struct pci_cap_saved_state *save_state;
u16 *cap;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!save_state || pos <= 0)
return;
cap = (u16 *)&save_state->cap.data[0];
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
/**
* pci_save_state - save the PCI configuration space of a device before suspending
* @dev: - PCI device that we're dealing with
*/
int
pci_save_state(struct pci_dev *dev)
{
int i;
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++)
pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
if ((i = pci_save_pcix_state(dev)) != 0)
return i;
return 0;
}
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
int i;
u32 val;
if (!dev->state_saved)
return;
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
/*
* The Base Address register should be programmed before the command
* register(s)
*/
for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val);
if (val != dev->saved_config_space[i]) {
dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
"space at offset %#x (was %#x, writing %#x)\n",
i, val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4,
dev->saved_config_space[i]);
}
}
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);
dev->state_saved = false;
}
struct pci_saved_state {
u32 config_space[16];
struct pci_cap_saved_data cap[0];
};
/**
* pci_store_saved_state - Allocate and return an opaque struct containing
* the device saved state.
* @dev: PCI device that we're dealing with
*
* Rerturn NULL if no state or error.
*/
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
{
struct pci_saved_state *state;
struct pci_cap_saved_state *tmp;
struct pci_cap_saved_data *cap;
struct hlist_node *pos;
size_t size;
if (!dev->state_saved)
return NULL;
size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
state = kzalloc(size, GFP_KERNEL);
if (!state)
return NULL;
memcpy(state->config_space, dev->saved_config_space,
sizeof(state->config_space));
cap = state->cap;
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
memcpy(cap, &tmp->cap, len);
cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
}
/* Empty cap_save terminates list */
return state;
}
EXPORT_SYMBOL_GPL(pci_store_saved_state);
/**
* pci_load_saved_state - Reload the provided save state into struct pci_dev.
* @dev: PCI device that we're dealing with
* @state: Saved state returned from pci_store_saved_state()
*/
int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
{
struct pci_cap_saved_data *cap;
dev->state_saved = false;
if (!state)
return 0;
memcpy(dev->saved_config_space, state->config_space,
sizeof(state->config_space));
cap = state->cap;
while (cap->size) {
struct pci_cap_saved_state *tmp;
tmp = pci_find_saved_cap(dev, cap->cap_nr);
if (!tmp || tmp->cap.size != cap->size)
return -EINVAL;
memcpy(tmp->cap.data, cap->data, tmp->cap.size);
cap = (struct pci_cap_saved_data *)((u8 *)cap +
sizeof(struct pci_cap_saved_data) + cap->size);
}
dev->state_saved = true;
return 0;
}
EXPORT_SYMBOL_GPL(pci_load_saved_state);
/**
* pci_load_and_free_saved_state - Reload the save state pointed to by state,
* and free the memory allocated for it.
* @dev: PCI device that we're dealing with
* @state: Pointer to saved state returned from pci_store_saved_state()
*/
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state)
{
int ret = pci_load_saved_state(dev, *state);
kfree(*state);
*state = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
return err;
err = pcibios_enable_device(dev, bars);
if (err < 0)
return err;
pci_fixup_device(pci_fixup_enable, dev);
return 0;
}
/**
* pci_reenable_device - Resume abandoned device
* @dev: PCI device to be resumed
*
* Note this function is a backend of pci_default_resume and is not supposed
* to be called by normal code, write proper resume handler and use it instead.
*/
int pci_reenable_device(struct pci_dev *dev)
{
if (pci_is_enabled(dev))
return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
return 0;
}
static int __pci_enable_device_flags(struct pci_dev *dev,
resource_size_t flags)
{
int err;
int i, bars = 0;
/*
* Power state could be unknown at this point, either due to a fresh
* boot or a device removal call. So get the current power state
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
err = do_pci_enable_device(dev, bars);
if (err < 0)
atomic_dec(&dev->enable_cnt);
return err;
}
/**
* pci_enable_device_io - Initialize a device for use with IO space
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O resources. Wake up the device if it was suspended.
* Beware, this function can fail.
*/
int pci_enable_device_io(struct pci_dev *dev)
{
return __pci_enable_device_flags(dev, IORESOURCE_IO);
}
/**
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable Memory resources. Wake up the device if it was suspended.
* Beware, this function can fail.
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
return __pci_enable_device_flags(dev, IORESOURCE_MEM);
}
/**
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
* Beware, this function can fail.
*
* Note we don't actually enable the device many times if we call
* this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
{
return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
/*
* Managed PCI resources. This manages device on/off, intx/msi/msix
* on/off and BAR regions. pci_dev itself records msi/msix status, so
* there's no need to track it separately. pci_devres is initialized
* when a device is enabled using managed PCI device enable interface.
*/
struct pci_devres {
unsigned int enabled:1;
unsigned int pinned:1;
unsigned int orig_intx:1;
unsigned int restore_intx:1;
u32 region_mask;
};
static void pcim_release(struct device *gendev, void *res)
{
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
struct pci_devres *this = res;
int i;
if (dev->msi_enabled)
pci_disable_msi(dev);
if (dev->msix_enabled)
pci_disable_msix(dev);
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
if (this->restore_intx)
pci_intx(dev, this->orig_intx);
if (this->enabled && !this->pinned)
pci_disable_device(dev);
}
static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
{
struct pci_devres *dr, *new_dr;
dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
if (dr)
return dr;
new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
if (!new_dr)
return NULL;
return devres_get(&pdev->dev, new_dr, NULL, NULL);
}
static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
{
if (pci_is_managed(pdev))
return devres_find(&pdev->dev, pcim_release, NULL, NULL);
return NULL;
}
/**
* pcim_enable_device - Managed pci_enable_device()
* @pdev: PCI device to be initialized
*
* Managed pci_enable_device().
*/
int pcim_enable_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
int rc;
dr = get_pci_dr(pdev);
if (unlikely(!dr))
return -ENOMEM;
if (dr->enabled)
return 0;
rc = pci_enable_device(pdev);
if (!rc) {
pdev->is_managed = 1;
dr->enabled = 1;
}
return rc;
}
/**
* pcim_pin_device - Pin managed PCI device
* @pdev: PCI device to pin
*
* Pin managed PCI device @pdev. Pinned device won't be disabled on
* driver detach. @pdev must have been enabled with
* pcim_enable_device().
*/
void pcim_pin_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
dr = find_pci_dr(pdev);
WARN_ON(!dr || !dr->enabled);
if (dr)
dr->pinned = 1;
}
/**
* pcibios_disable_device - disable arch specific PCI resources for device dev
* @dev: the PCI device to disable
*
* Disables architecture specific PCI resources for the device. This
* is the default implementation. Architecture implementations can
* override this.
*/
void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_command);
}
pcibios_disable_device(dev);
}
/**
* pci_disable_enabled_device - Disable device without updating enable_cnt
* @dev: PCI device to disable
*
* NOTE: This function is a backend of PCI power management routines and is
* not supposed to be called drivers.
*/
void pci_disable_enabled_device(struct pci_dev *dev)
{
if (pci_is_enabled(dev))
do_pci_disable_device(dev);
}
/**
* pci_disable_device - Disable PCI device after use
* @dev: PCI device to be disabled
*
* Signal to the system that the PCI device is not in use by the system
* anymore. This only involves disabling PCI bus-mastering, if active.
*
* Note we don't actually disable the device until all callers of
* pci_enable_device() have called pci_disable_device().
*/
void
pci_disable_device(struct pci_dev *dev)
{
struct pci_devres *dr;
dr = find_pci_dr(dev);
if (dr)
dr->enabled = 0;
if (atomic_sub_return(1, &dev->enable_cnt) != 0)
return;
do_pci_disable_device(dev);
dev->is_busmaster = 0;
}
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
* Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state)
{
return -EINVAL;
}
/**
* pci_set_pcie_reset_state - set reset state for device dev
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
* Sets the PCI reset state for the device.
*/
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
return pcibios_set_pcie_reset_state(dev, state);
}
/**
* pci_check_pme_status - Check if given device has generated PME.
* @dev: Device to check.
*
* Check the PME status of the device and if set, clear it and clear PME enable
* (if set). Return 'true' if PME status and PME enable were both set or
* 'false' otherwise.
*/
bool pci_check_pme_status(struct pci_dev *dev)
{
int pmcsr_pos;
u16 pmcsr;
bool ret = false;
if (!dev->pm_cap)
return false;
pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
pci_read_config_word(dev, pmcsr_pos, &pmcsr);
if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
return false;
/* Clear PME status. */
pmcsr |= PCI_PM_CTRL_PME_STATUS;
if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
/* Disable PME to avoid interrupt flood. */
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
ret = true;
}
pci_write_config_word(dev, pmcsr_pos, pmcsr);
return ret;
}
/**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle.
* @ign: Ignored.
*
* Check if @dev has generated PME and queue a resume request for it in that
* case.
*/
static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
{
if (pci_check_pme_status(dev)) {
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
}
return 0;
}
/**
* pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
* @bus: Top bus of the subtree to walk.
*/
void pci_pme_wakeup_bus(struct pci_bus *bus)
{
if (bus)
pci_walk_bus(bus, pci_pme_wakeup, NULL);
}
/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
* @state: PCI state from which device will issue PME#.
*/
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
{
if (!dev->pm_cap)
return false;
return !!(dev->pme_support & (1 << state));
}
static void pci_pme_list_scan(struct work_struct *work)
{
struct pci_pme_device *pme_dev;
mutex_lock(&pci_pme_list_mutex);
if (!list_empty(&pci_pme_list)) {
list_for_each_entry(pme_dev, &pci_pme_list, list)
pci_pme_wakeup(pme_dev->dev, NULL);
schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
}
mutex_unlock(&pci_pme_list_mutex);
}
/**
* pci_external_pme - is a device an external PCI PME source?
* @dev: PCI device to check
*
*/
static bool pci_external_pme(struct pci_dev *dev)
{
if (pci_is_pcie(dev) || dev->bus->number == 0)
return false;
return true;
}
/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
* @enable: 'true' to enable PME# generation; 'false' to disable it.
*
* The caller must verify that the device is capable of generating PME# before
* calling this function with @enable equal to 'true'.
*/
void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
if (!dev->pm_cap)
return;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
/* Clear PME_Status by writing 1 to it and enable PME# */
pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
if (!enable)
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
/* PCI (as opposed to PCIe) PME requires that the device have
its PME# line hooked up correctly. Not all hardware vendors
do this, so the PME never gets delivered and the device
remains asleep. The easiest way around this is to
periodically walk the list of suspended devices and check
whether any have their PME flag set. The assumption is that
we'll wake up often enough anyway that this won't be a huge
hit, and the power savings from the devices will still be a
win. */
if (pci_external_pme(dev)) {
struct pci_pme_device *pme_dev;
if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device),
GFP_KERNEL);
if (!pme_dev)
goto out;
pme_dev->dev = dev;
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
if (list_is_singular(&pci_pme_list))
schedule_delayed_work(&pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
} else {
mutex_lock(&pci_pme_list_mutex);
list_for_each_entry(pme_dev, &pci_pme_list, list) {
if (pme_dev->dev == dev) {
list_del(&pme_dev->list);
kfree(pme_dev);
break;
}
}
mutex_unlock(&pci_pme_list_mutex);
}
}
out:
dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
/**
* __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
* @runtime: True if the events are to be generated at run time
* @enable: True to enable event generation; false to disable
*
* This enables the device as a wakeup event source, or disables it.
* When such events involves platform-specific hooks, those hooks are
* called automatically by this routine.
*
* Devices with legacy power management (no standard PCI PM capabilities)
* always require such platform hooks.
*
* RETURN VALUE:
* 0 is returned on success
* -EINVAL is returned if device is not supposed to wake up the system
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool runtime, bool enable)
{
int ret = 0;
if (enable && !runtime && !device_may_wakeup(&dev->dev))
return -EINVAL;
/* Don't do the same thing twice in a row for one device. */
if (!!enable == !!dev->wakeup_prepared)
return 0;
/*
* According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
* Anderson we should be doing PME# wake enable followed by ACPI wake
* enable. To disable wake-up we call the platform first, for symmetry.
*/
if (enable) {
int error;
if (pci_pme_capable(dev, state))
pci_pme_active(dev, true);
else
ret = 1;
error = runtime ? platform_pci_run_wake(dev, true) :
platform_pci_sleep_wake(dev, true);
if (ret)
ret = error;
if (!ret)
dev->wakeup_prepared = true;
} else {
if (runtime)
platform_pci_run_wake(dev, false);
else
platform_pci_sleep_wake(dev, false);
pci_pme_active(dev, false);
dev->wakeup_prepared = false;
}
return ret;
}
EXPORT_SYMBOL(__pci_enable_wake);
/**
* pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
* @dev: PCI device to prepare
* @enable: True to enable wake-up event generation; false to disable
*
* Many drivers want the device to wake up the system from D3_hot or D3_cold
* and this function allows them to set that up cleanly - pci_enable_wake()
* should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
* ordering constraints.
*
* This function only returns error code if the device is not capable of
* generating PME# from both D3_hot and D3_cold, and the platform is unable to
* enable wake-up power for it.
*/
int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
return pci_pme_capable(dev, PCI_D3cold) ?
pci_enable_wake(dev, PCI_D3cold, enable) :
pci_enable_wake(dev, PCI_D3hot, enable);
}
/**
* pci_target_state - find an appropriate low power state for a given PCI dev
* @dev: PCI device
*
* Use underlying platform code to find a supported low power state for @dev.
* If the platform can't manage @dev, return the deepest state from which it
* can generate wake events, based on any available PME info.
*/
pci_power_t pci_target_state(struct pci_dev *dev)
{
pci_power_t target_state = PCI_D3hot;
if (platform_pci_power_manageable(dev)) {
/*
* Call the platform to choose the target state of the device
* and enable wake-up from this state if supported.
*/
pci_power_t state = platform_pci_choose_state(dev);
switch (state) {
case PCI_POWER_ERROR:
case PCI_UNKNOWN:
break;
case PCI_D1:
case PCI_D2:
if (pci_no_d1d2(dev))
break;
default:
target_state = state;
}
} else if (!dev->pm_cap) {
target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
target_state--;
}
}
return target_state;
}
/**
* pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
* @dev: Device to handle.
*
* Choose the power state appropriate for the device depending on whether
* it can wake up the system and/or is power manageable by the platform
* (PCI_D3hot is the default) and put the device into that state.
*/
int pci_prepare_to_sleep(struct pci_dev *dev)
{
pci_power_t target_state = pci_target_state(dev);
int error;
if (target_state == PCI_POWER_ERROR)
return -EIO;
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
if (error)
pci_enable_wake(dev, target_state, false);
return error;
}
/**
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
* @dev: Device to handle.
*
* Disable device's system wake-up capability and put it into D0.
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
pci_enable_wake(dev, PCI_D0, false);
return pci_set_power_state(dev, PCI_D0);
}
/**
* pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
* @dev: PCI device being suspended.
*
* Prepare @dev to generate wake-up events at run time and put it into a low
* power state.
*/
int pci_finish_runtime_suspend(struct pci_dev *dev)
{
pci_power_t target_state = pci_target_state(dev);
int error;
if (target_state == PCI_POWER_ERROR)
return -EIO;
__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
if (error)
__pci_enable_wake(dev, target_state, true, false);
return error;
}
/**
* pci_dev_run_wake - Check if device can generate run-time wake-up events.
* @dev: Device to check.
*
* Return true if the device itself is cabable of generating wake-up events
* (through the platform or using the native PCIe PME) or if the device supports
* PME and one of its upstream bridges can generate wake-up events.
*/
bool pci_dev_run_wake(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
if (device_run_wake(&dev->dev))
return true;
if (!dev->pme_support)
return false;
while (bus->parent) {
struct pci_dev *bridge = bus->self;
if (device_run_wake(&bridge->dev))
return true;
bus = bus->parent;
}
/* We have reached the root bus. */
if (bus->bridge)
return device_run_wake(bus->bridge);
return false;
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
void pci_pm_init(struct pci_dev *dev)
{
int pm;
u16 pmc;
pm_runtime_forbid(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
dev->pm_cap = 0;
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
return;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
return;
}
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
if (!pci_no_d1d2(dev)) {
if (pmc & PCI_PM_CAP_D1)
dev->d1_support = true;
if (pmc & PCI_PM_CAP_D2)
dev->d2_support = true;
if (dev->d1_support || dev->d2_support)
dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
dev->d1_support ? " D1" : "",
dev->d2_support ? " D2" : "");
}
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
dev_printk(KERN_DEBUG, &dev->dev,
"PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
/*
* Make device's PM flags reflect the wake-up capability, but
* let the user space enable it to wake up the system as needed.
*/
device_set_wakeup_capable(&dev->dev, true);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
} else {
dev->pme_support = 0;
}
}
/**
* platform_pci_wakeup_init - init platform wakeup if present
* @dev: PCI device
*
* Some devices don't have PCI PM caps but can still generate wakeup
* events through platform methods (like ACPI events). If @dev supports
* platform wakeup events, set the device flag to indicate as much. This
* may be redundant if the device also supports PCI PM caps, but double
* initialization should be safe in that case.
*/
void platform_pci_wakeup_init(struct pci_dev *dev)
{
if (!platform_pci_can_wakeup(dev))
return;
device_set_wakeup_capable(&dev->dev, true);
platform_pci_sleep_wake(dev, false);
}
/**
* pci_add_save_buffer - allocate buffer for saving given capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @size: requested size of the buffer
*/
static int pci_add_cap_save_buffer(
struct pci_dev *dev, char cap, unsigned int size)
{
int pos;
struct pci_cap_saved_state *save_state;
pos = pci_find_capability(dev, cap);
if (pos <= 0)
return 0;
save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
if (!save_state)
return -ENOMEM;
save_state->cap.cap_nr = cap;
save_state->cap.size = size;
pci_add_saved_cap(dev, save_state);
return 0;
}
/**
* pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
* @dev: the PCI device
*/
void pci_allocate_cap_save_buffers(struct pci_dev *dev)
{
int error;
error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
PCI_EXP_SAVE_REGS * sizeof(u16));
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI Express save buffer\n");
error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI-X save buffer\n");
}
/**
* pci_enable_ari - enable ARI forwarding if hardware support it
* @dev: the PCI device
*/
void pci_enable_ari(struct pci_dev *dev)
{
int pos;
u32 cap;
u16 flags, ctrl;
struct pci_dev *bridge;
if (!pci_is_pcie(dev) || dev->devfn)
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
if (!pos)
return;
bridge = dev->bus->self;
if (!bridge || !pci_is_pcie(bridge))
return;
pos = pci_pcie_cap(bridge);
if (!pos)
return;
/* ARI is a PCIe v2 feature */
pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
if ((flags & PCI_EXP_FLAGS_VERS) < 2)
return;
pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl |= PCI_EXP_DEVCTL2_ARI;
pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
bridge->ari_enabled = 1;
}
/**
* pci_enable_ido - enable ID-based ordering on a device
* @dev: the PCI device
* @type: which types of IDO to enable
*
* Enable ID-based ordering on @dev. @type can contain the bits
* %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
* which types of transactions are allowed to be re-ordered.
*/
void pci_enable_ido(struct pci_dev *dev, unsigned long type)
{
int pos;
u16 ctrl;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (type & PCI_EXP_IDO_REQUEST)
ctrl |= PCI_EXP_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
ctrl |= PCI_EXP_IDO_CMP_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_enable_ido);
/**
* pci_disable_ido - disable ID-based ordering on a device
* @dev: the PCI device
* @type: which types of IDO to disable
*/
void pci_disable_ido(struct pci_dev *dev, unsigned long type)
{
int pos;
u16 ctrl;
if (!pci_is_pcie(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (type & PCI_EXP_IDO_REQUEST)
ctrl &= ~PCI_EXP_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
ctrl &= ~PCI_EXP_IDO_CMP_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_ido);
/**
* pci_enable_obff - enable optimized buffer flush/fill
* @dev: PCI device
* @type: type of signaling to use
*
* Try to enable @type OBFF signaling on @dev. It will try using WAKE#
* signaling if possible, falling back to message signaling only if
* WAKE# isn't supported. @type should indicate whether the PCIe link
* be brought out of L0s or L1 to send the message. It should be either
* %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
*
* If your device can benefit from receiving all messages, even at the
* power cost of bringing the link back up from a low power state, use
* %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
* preferred type).
*
* RETURNS:
* Zero on success, appropriate error number on failure.
*/
int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
{
int pos;
u32 cap;
u16 ctrl;
int ret;
if (!pci_is_pcie(dev))
return -ENOTSUPP;
pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTSUPP;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_OBFF_MASK))
return -ENOTSUPP; /* no OBFF support at all */
/* Make sure the topology supports OBFF as well */
if (dev->bus) {
ret = pci_enable_obff(dev->bus->self, type);
if (ret)
return ret;
}
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (cap & PCI_EXP_OBFF_WAKE)
ctrl |= PCI_EXP_OBFF_WAKE_EN;
else {
switch (type) {
case PCI_EXP_OBFF_SIGNAL_L0:
if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
ctrl |= PCI_EXP_OBFF_MSGA_EN;
break;
case PCI_EXP_OBFF_SIGNAL_ALWAYS:
ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
ctrl |= PCI_EXP_OBFF_MSGB_EN;
break;
default:
WARN(1, "bad OBFF signal type\n");
return -ENOTSUPP;
}
}
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
return 0;
}
EXPORT_SYMBOL(pci_enable_obff);
/**
* pci_disable_obff - disable optimized buffer flush/fill
* @dev: PCI device
*
* Disable OBFF on @dev.
*/
void pci_disable_obff(struct pci_dev *dev)
{
int pos;
u16 ctrl;
if (!pci_is_pcie(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_obff);
/**
* pci_ltr_supported - check whether a device supports LTR
* @dev: PCI device
*
* RETURNS:
* True if @dev supports latency tolerance reporting, false otherwise.
*/
bool pci_ltr_supported(struct pci_dev *dev)
{
int pos;
u32 cap;
if (!pci_is_pcie(dev))
return false;
pos = pci_pcie_cap(dev);
if (!pos)
return false;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
return cap & PCI_EXP_DEVCAP2_LTR;
}
EXPORT_SYMBOL(pci_ltr_supported);
/**
* pci_enable_ltr - enable latency tolerance reporting
* @dev: PCI device
*
* Enable LTR on @dev if possible, which means enabling it first on
* upstream ports.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int pci_enable_ltr(struct pci_dev *dev)
{
int pos;
u16 ctrl;
int ret;
if (!pci_ltr_supported(dev))
return -ENOTSUPP;
pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTSUPP;
/* Only primary function can enable/disable LTR */
if (PCI_FUNC(dev->devfn) != 0)
return -EINVAL;
/* Enable upstream ports first */
if (dev->bus) {
ret = pci_enable_ltr(dev->bus->self);
if (ret)
return ret;
}
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl |= PCI_EXP_LTR_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
return 0;
}
EXPORT_SYMBOL(pci_enable_ltr);
/**
* pci_disable_ltr - disable latency tolerance reporting
* @dev: PCI device
*/
void pci_disable_ltr(struct pci_dev *dev)
{
int pos;
u16 ctrl;
if (!pci_ltr_supported(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
/* Only primary function can enable/disable LTR */
if (PCI_FUNC(dev->devfn) != 0)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl &= ~PCI_EXP_LTR_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_ltr);
static int __pci_ltr_scale(int *val)
{
int scale = 0;
while (*val > 1023) {
*val = (*val + 31) / 32;
scale++;
}
return scale;
}
/**
* pci_set_ltr - set LTR latency values
* @dev: PCI device
* @snoop_lat_ns: snoop latency in nanoseconds
* @nosnoop_lat_ns: nosnoop latency in nanoseconds
*
* Figure out the scale and set the LTR values accordingly.
*/
int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
{
int pos, ret, snoop_scale, nosnoop_scale;
u16 val;
if (!pci_ltr_supported(dev))
return -ENOTSUPP;
snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
return -EINVAL;
if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
(nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
return -EINVAL;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
if (!pos)
return -ENOTSUPP;
val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
if (ret != 4)
return -EIO;
val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
if (ret != 4)
return -EIO;
return 0;
}
EXPORT_SYMBOL(pci_set_ltr);
static int pci_acs_enable;
/**
* pci_request_acs - ask for ACS to be enabled if supported
*/
void pci_request_acs(void)
{
pci_acs_enable = 1;
}
/**
* pci_enable_acs - enable ACS if hardware support it
* @dev: the PCI device
*/
void pci_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
u16 ctrl;
if (!pci_acs_enable)
return;
if (!pci_is_pcie(dev))
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
/* Source Validation */
ctrl |= (cap & PCI_ACS_SV);
/* P2P Request Redirect */
ctrl |= (cap & PCI_ACS_RR);
/* P2P Completion Redirect */
ctrl |= (cap & PCI_ACS_CR);
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
}
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
* behind bridges on add-in cards. For devices with ARI enabled, the slot
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
{
int slot;
if (pci_ari_enabled(dev->bus))
slot = 0;
else
slot = PCI_SLOT(dev->devfn);
return (((pin - 1) + slot) % 4) + 1;
}
int
pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
{
u8 pin;
pin = dev->pin;
if (!pin)
return -1;
while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
*bridge = dev;
return pin;
}
/**
* pci_common_swizzle - swizzle INTx all the way to root bridge
* @dev: the PCI device
* @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
*
* Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
* bridges all the way up to a PCI root bus.
*/
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
*pinp = pin;
return PCI_SLOT(dev->devfn);
}
/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by pci_request_region
* @bar: BAR to release
*
* Releases the PCI I/O and memory resources previously reserved by a
* successful call to pci_request_region. Call this function only
* after all use of the PCI regions has ceased.
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
struct pci_devres *dr;
if (pci_resource_len(pdev, bar) == 0)
return;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
release_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
dr = find_pci_dr(pdev);
if (dr)
dr->region_mask &= ~(1 << bar);
}
/**
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
* @exclusive: whether the region access is exclusive or not
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
* sysfs MMIO access.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
int exclusive)
{
struct pci_devres *dr;
if (pci_resource_len(pdev, bar) == 0)
return 0;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
goto err_out;
}
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name,
exclusive))
goto err_out;
}
dr = find_pci_dr(pdev);
if (dr)
dr->region_mask |= 1 << bar;
return 0;
err_out:
dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
&pdev->resource[bar]);
return -EBUSY;
}
/**
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource
*
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, 0);
}
/**
* pci_request_region_exclusive - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*
* The key difference that _exclusive makes it that userspace is
* explicitly not allowed to map the resource via /dev/mem or
* sysfs.
*/
int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
}
/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
* @bars: Bitmask of BARs to be released
*
* Release selected PCI I/O and memory resources previously reserved.
* Call this function only after all use of the PCI regions has ceased.
*/
void pci_release_selected_regions(struct pci_dev *pdev, int bars)
{
int i;
for (i = 0; i < 6; i++)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name, int excl)
{
int i;
for (i = 0; i < 6; i++)
if (bars & (1 << i))
if (__pci_request_region(pdev, i, res_name, excl))
goto err_out;
return 0;
err_out:
while(--i >= 0)
if (bars & (1 << i))
pci_release_region(pdev, i);
return -EBUSY;
}
/**
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
* @res_name: Name to be associated with resource
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name, 0);
}
int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
int bars, const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name,
IORESOURCE_EXCLUSIVE);
}
/**
* pci_release_regions - Release reserved PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved by pci_request_regions
*
* Releases all PCI I/O and memory resources previously reserved by a
* successful call to pci_request_regions. Call this function only
* after all use of the PCI regions has ceased.
*/
void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << 6) - 1);
}
/**
* pci_request_regions - Reserved PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @res_name: Name to be associated with resource.
*
* Mark all PCI regions associated with PCI device @pdev as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
}
/**
* pci_request_regions_exclusive - Reserved PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @res_name: Name to be associated with resource.
*
* Mark all PCI regions associated with PCI device @pdev as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* pci_request_regions_exclusive() will mark the region so that
* /dev/mem and the sysfs MMIO access will not be allowed.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions_exclusive(pdev,
((1 << 6) - 1), res_name);
}
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dev_dbg(&dev->dev, "%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
pcibios_set_master(dev);
}
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
*
* Helper function for pci_set_mwi.
* Originally copied from drivers/net/acenic.c.
* Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
if (!pci_cache_line_size)
return -EINVAL;
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
if (cacheline_size >= pci_cache_line_size &&
(cacheline_size % pci_cache_line_size) == 0)
return 0;
/* Write the correct value. */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
/* Read it back. */
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
if (cacheline_size == pci_cache_line_size)
return 0;
dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
"supported\n", pci_cache_line_size << 2);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
#ifdef PCI_DISABLE_MWI
int pci_set_mwi(struct pci_dev *dev)
{
return 0;
}
int pci_try_set_mwi(struct pci_dev *dev)
{
return 0;
}
void pci_clear_mwi(struct pci_dev *dev)
{
}
#else
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
* Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int
pci_set_mwi(struct pci_dev *dev)
{
int rc;
u16 cmd;
rc = pci_set_cacheline_size(dev);
if (rc)
return rc;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (! (cmd & PCI_COMMAND_INVALIDATE)) {
dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
* Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
* Callers are not required to check the return value.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int pci_try_set_mwi(struct pci_dev *dev)
{
int rc = pci_set_mwi(dev);
return rc;
}
/**
* pci_clear_mwi - disables Memory-Write-Invalidate for device dev
* @dev: the PCI device to disable
*
* Disables PCI Memory-Write-Invalidate transaction on the device
*/
void
pci_clear_mwi(struct pci_dev *dev)
{
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_INVALIDATE) {
cmd &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
}
#endif /* ! PCI_DISABLE_MWI */
/**
* pci_intx - enables/disables PCI INTx for device dev
* @pdev: the PCI device to operate on
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device dev
*/
void
pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
if (enable) {
new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
} else {
new = pci_command | PCI_COMMAND_INTX_DISABLE;
}
if (new != pci_command) {
struct pci_devres *dr;
pci_write_config_word(pdev, PCI_COMMAND, new);
dr = find_pci_dr(pdev);
if (dr && !dr->restore_intx) {
dr->restore_intx = 1;
dr->orig_intx = !enable;
}
}
}
/**
* pci_msi_off - disables any msi or msix capabilities
* @dev: the PCI device to operate on
*
* If you want to use msi see pci_enable_msi and friends.
* This is a lower level primitive that allows us to disable
* msi operation at the device level.
*/
void pci_msi_off(struct pci_dev *dev)
{
int pos;
u16 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
if (pos) {
pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
}
EXPORT_SYMBOL_GPL(pci_msi_off);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
{
return dma_set_max_seg_size(&dev->dev, size);
}
EXPORT_SYMBOL(pci_set_dma_max_seg_size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
{
return dma_set_seg_boundary(&dev->dev, mask);
}
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
static int pcie_flr(struct pci_dev *dev, int probe)
{
int i;
int pos;
u32 cap;
u16 status, control;
pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTTY;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
msleep((1 << (i - 1)) * 100);
pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
if (!(status & PCI_EXP_DEVSTA_TRPND))
goto clear;
}
dev_err(&dev->dev, "transaction is not cleared; "
"proceeding with reset anyway\n");
clear:
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
control |= PCI_EXP_DEVCTL_BCR_FLR;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
msleep(100);
return 0;
}
static int pci_af_flr(struct pci_dev *dev, int probe)
{
int i;
int pos;
u8 cap;
u8 status;
pos = pci_find_capability(dev, PCI_CAP_ID_AF);
if (!pos)
return -ENOTTY;
pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
msleep((1 << (i - 1)) * 100);
pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
if (!(status & PCI_AF_STATUS_TP))
goto clear;
}
dev_err(&dev->dev, "transaction is not cleared; "
"proceeding with reset anyway\n");
clear:
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
msleep(100);
return 0;
}
/**
* pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
* @dev: Device to reset.
* @probe: If set, only check if the device can be reset this way.
*
* If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
* unset, it will be reinitialized internally when going from PCI_D3hot to
* PCI_D0. If that's the case and the device is not in a low-power state
* already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
* by devault (i.e. unless the @dev's d3_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
{
u16 csr;
if (!dev->pm_cap)
return -ENOTTY;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
return -ENOTTY;
if (probe)
return 0;
if (dev->current_state != PCI_D0)
return -EINVAL;
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
return 0;
}
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
u16 ctrl;
struct pci_dev *pdev;
if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev != dev)
return -ENOTTY;
if (probe)
return 0;
pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
msleep(100);
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
msleep(100);
return 0;
}
static int pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
might_sleep();
if (!probe) {
pci_block_user_cfg_access(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
}
rc = pci_dev_specific_reset(dev, probe);
if (rc != -ENOTTY)
goto done;
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
rc = pci_af_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
rc = pci_pm_reset(dev, probe);
if (rc != -ENOTTY)
goto done;
rc = pci_parent_bus_reset(dev, probe);
done:
if (!probe) {
device_unlock(&dev->dev);
pci_unblock_user_cfg_access(dev);
}
return rc;
}
/**
* __pci_reset_function - reset a PCI device function
* @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
* to PCI config space in order to use this function.
*
* The device function is presumed to be unused when this function is called.
* Resetting the device will make the contents of PCI configuration space
* random, so any caller of this must be prepared to reinitialise the
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
* Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int __pci_reset_function(struct pci_dev *dev)
{
return pci_dev_reset(dev, 0);
}
EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
* pci_probe_reset_function - check whether the device can be safely reset
* @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
* to PCI config space in order to use this function.
*
* Returns 0 if the device function can be reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_probe_reset_function(struct pci_dev *dev)
{
return pci_dev_reset(dev, 1);
}
/**
* pci_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
* to PCI config space in order to use this function.
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
* from __pci_reset_function in that it saves and restores device state
* over the reset.
*
* Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
int rc;
rc = pci_dev_reset(dev, 1);
if (rc)
return rc;
pci_save_state(dev);
/*
* both INTx and MSI are disabled after the Interrupt Disable bit
* is set and the Bus Master bit is cleared.
*/
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
* Returns mmrbc: maximum designed memory read count in bytes
* or appropriate error value.
*/
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
int cap;
u32 stat;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
}
EXPORT_SYMBOL(pcix_get_max_mmrbc);
/**
* pcix_get_mmrbc - get PCI-X maximum memory read byte count
* @dev: PCI device to query
*
* Returns mmrbc: maximum memory read count in bytes
* or appropriate error value.
*/
int pcix_get_mmrbc(struct pci_dev *dev)
{
int cap;
u16 cmd;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
return -EINVAL;
return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
}
EXPORT_SYMBOL(pcix_get_mmrbc);
/**
* pcix_set_mmrbc - set PCI-X maximum memory read byte count
* @dev: PCI device to query
* @mmrbc: maximum memory read count in bytes
* valid values are 512, 1024, 2048, 4096
*
* If possible sets maximum memory read byte count, some bridges have erratas
* that prevent this.
*/
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
{
int cap;
u32 stat, v, o;
u16 cmd;
if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
return -EINVAL;
v = ffs(mmrbc) - 10;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
return -E2BIG;
if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
return -EINVAL;
o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
if (o != v) {
if (v > o && dev->bus &&
(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
return -EIO;
cmd &= ~PCI_X_CMD_MAX_READ;
cmd |= v << 2;
if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
return -EIO;
}
return 0;
}
EXPORT_SYMBOL(pcix_set_mmrbc);
/**
* pcie_get_readrq - get PCI Express read request size
* @dev: PCI device to query
*
* Returns maximum memory read request in bytes
* or appropriate error value.
*/
int pcie_get_readrq(struct pci_dev *dev)
{
int ret, cap;
u16 ctl;
cap = pci_pcie_cap(dev);
if (!cap)
return -EINVAL;
ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (!ret)
ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
return ret;
}
EXPORT_SYMBOL(pcie_get_readrq);
/**
* pcie_set_readrq - set PCI Express maximum memory read request
* @dev: PCI device to query
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
* If possible sets maximum read byte count
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
int cap, err = -EINVAL;
u16 ctl, v;
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
goto out;
v = (ffs(rq) - 8) << 12;
cap = pci_pcie_cap(dev);
if (!cap)
goto out;
err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (err)
goto out;
if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= v;
err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
}
out:
return err;
}
EXPORT_SYMBOL(pcie_set_readrq);
/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
*
* This helper routine makes bar mask from the type of resource.
*/
int pci_select_bars(struct pci_dev *dev, unsigned long flags)
{
int i, bars = 0;
for (i = 0; i < PCI_NUM_RESOURCES; i++)
if (pci_resource_flags(dev, i) & flags)
bars |= (1 << i);
return bars;
}
/**
* pci_resource_bar - get position of the BAR associated with a resource
* @dev: the PCI device
* @resno: the resource number
* @type: the BAR type to be filled in
*
* Returns BAR position in config space, or 0 if the BAR is invalid.
*/
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
{
int reg;
if (resno < PCI_ROM_RESOURCE) {
*type = pci_bar_unknown;
return PCI_BASE_ADDRESS_0 + 4 * resno;
} else if (resno == PCI_ROM_RESOURCE) {
*type = pci_bar_mem32;
return dev->rom_base_reg;
} else if (resno < PCI_BRIDGE_RESOURCES) {
/* device specific resource */
reg = pci_iov_resource_bar(dev, resno, type);
if (reg)
return reg;
}
dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
return 0;
}
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
void __init pci_register_set_vga_state(arch_set_vga_state_t func)
{
arch_set_vga_state = func; /* NULL disables */
}
static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
unsigned int command_bits, u32 flags)
{
if (arch_set_vga_state)
return arch_set_vga_state(dev, decode, command_bits,
flags);
return 0;
}
/**
* pci_set_vga_state - set VGA decode state on device and parents if requested
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
* @flags: traverse ancestors and change bridges
* CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
unsigned int command_bits, u32 flags)
{
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
int rc;
WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
/* ARCH specific VGA enables */
rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
if (rc)
return rc;
if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (decode == true)
cmd |= command_bits;
else
cmd &= ~command_bits;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
return 0;
bus = dev->bus;
while (bus) {
bridge = bus->self;
if (bridge) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&cmd);
if (decode == true)
cmd |= PCI_BRIDGE_CTL_VGA;
else
cmd &= ~PCI_BRIDGE_CTL_VGA;
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
cmd);
}
bus = bus->parent;
}
return 0;
}
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
* @dev: the PCI device to get
*
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
resource_size_t align = 0;
char *p;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
p[count] == '@') {
p += count + 1;
} else {
align_order = -1;
}
if (sscanf(p, "%x:%x:%x.%x%n",
&seg, &bus, &slot, &func, &count) != 4) {
seg = 0;
if (sscanf(p, "%x:%x.%x%n",
&bus, &slot, &func, &count) != 3) {
/* Invalid format */
printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
p);
break;
}
}
p += count;
if (seg == pci_domain_nr(dev->bus) &&
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
if (align_order == -1) {
align = PAGE_SIZE;
} else {
align = 1 << align_order;
}
/* Found */
break;
}
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
break;
}
p++;
}
spin_unlock(&resource_alignment_lock);
return align;
}
/**
* pci_is_reassigndev - check if specified PCI is target device to reassign
* @dev: the PCI device to check
*
* RETURNS: non-zero for PCI device is a target device to reassign,
* or zero is not.
*/
int pci_is_reassigndev(struct pci_dev *dev)
{
return (pci_specified_resource_alignment(dev) != 0);
}
ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
{
if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
spin_lock(&resource_alignment_lock);
strncpy(resource_alignment_param, buf, count);
resource_alignment_param[count] = '\0';
spin_unlock(&resource_alignment_lock);
return count;
}
ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
{
size_t count;
spin_lock(&resource_alignment_lock);
count = snprintf(buf, size, "%s", resource_alignment_param);
spin_unlock(&resource_alignment_lock);
return count;
}
static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
{
return pci_get_resource_alignment_param(buf, PAGE_SIZE);
}
static ssize_t pci_resource_alignment_store(struct bus_type *bus,
const char *buf, size_t count)
{
return pci_set_resource_alignment_param(buf, count);
}
BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
pci_resource_alignment_store);
static int __init pci_resource_alignment_sysfs_init(void)
{
return bus_create_file(&pci_bus_type,
&bus_attr_resource_alignment);
}
late_initcall(pci_resource_alignment_sysfs_init);
static void __devinit pci_no_domains(void)
{
#ifdef CONFIG_PCI_DOMAINS
pci_domains_supported = 0;
#endif
}
/**
* pci_ext_cfg_enabled - can we access extended PCI config space?
* @dev: The PCI device of the root bridge.
*
* Returns 1 if we can access PCI extended config space (offsets
* greater than 0xff). This is the default implementation. Architecture
* implementations can override this.
*/
int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
{
return 1;
}
void __weak pci_fixup_cardbus(struct pci_bus *bus)
{
}
EXPORT_SYMBOL(pci_fixup_cardbus);
static int __init pci_setup(char *str)
{
while (str) {
char *k = strchr(str, ',');
if (k)
*k++ = 0;
if (*str && (str = pcibios_setup(str)) && *str) {
if (!strcmp(str, "nomsi")) {
pci_no_msi();
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
} else if (!strncmp(str, "realloc", 7)) {
pci_realloc();
} else if (!strcmp(str, "nodomains")) {
pci_no_domains();
} else if (!strncmp(str, "cbiosize=", 9)) {
pci_cardbus_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "cbmemsize=", 10)) {
pci_cardbus_mem_size = memparse(str + 10, &str);
} else if (!strncmp(str, "resource_alignment=", 19)) {
pci_set_resource_alignment_param(str + 19,
strlen(str + 19));
} else if (!strncmp(str, "ecrc=", 5)) {
pcie_ecrc_get_policy(str + 5);
} else if (!strncmp(str, "hpiosize=", 9)) {
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
}
}
str = k;
}
return 0;
}
early_param("pci", pci_setup);
EXPORT_SYMBOL(pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_io);
EXPORT_SYMBOL(pci_enable_device_mem);
EXPORT_SYMBOL(pci_enable_device);
EXPORT_SYMBOL(pcim_enable_device);
EXPORT_SYMBOL(pcim_pin_device);
EXPORT_SYMBOL(pci_disable_device);
EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
EXPORT_SYMBOL(pci_release_regions);
EXPORT_SYMBOL(pci_request_regions);
EXPORT_SYMBOL(pci_request_regions_exclusive);
EXPORT_SYMBOL(pci_release_region);
EXPORT_SYMBOL(pci_request_region);
EXPORT_SYMBOL(pci_request_region_exclusive);
EXPORT_SYMBOL(pci_release_selected_regions);
EXPORT_SYMBOL(pci_request_selected_regions);
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
EXPORT_SYMBOL(pci_set_master);
EXPORT_SYMBOL(pci_clear_master);
EXPORT_SYMBOL(pci_set_mwi);
EXPORT_SYMBOL(pci_try_set_mwi);
EXPORT_SYMBOL(pci_clear_mwi);
EXPORT_SYMBOL_GPL(pci_intx);
EXPORT_SYMBOL(pci_assign_resource);
EXPORT_SYMBOL(pci_find_parent_resource);
EXPORT_SYMBOL(pci_select_bars);
EXPORT_SYMBOL(pci_set_power_state);
EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
EXPORT_SYMBOL(pci_wake_from_d3);
EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
| gpl-2.0 |
mythos234/NB_ET_Kernel | drivers/gpu/drm/ttm/ttm_lock.c | 2544 | 7405 | /**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>
#define TTM_WRITE_LOCK_PENDING (1 << 0)
#define TTM_VT_LOCK_PENDING (1 << 1)
#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
#define TTM_VT_LOCK (1 << 3)
#define TTM_SUSPEND_LOCK (1 << 4)
void ttm_lock_init(struct ttm_lock *lock)
{
spin_lock_init(&lock->lock);
init_waitqueue_head(&lock->queue);
lock->rw = 0;
lock->flags = 0;
lock->kill_takers = false;
lock->signal = SIGKILL;
}
EXPORT_SYMBOL(ttm_lock_init);
void ttm_read_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
if (--lock->rw == 0)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_read_unlock);
static bool __ttm_read_lock(struct ttm_lock *lock)
{
bool locked = false;
spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
}
spin_unlock(&lock->lock);
return locked;
}
int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
{
int ret = 0;
if (interruptible)
ret = wait_event_interruptible(lock->queue,
__ttm_read_lock(lock));
else
wait_event(lock->queue, __ttm_read_lock(lock));
return ret;
}
EXPORT_SYMBOL(ttm_read_lock);
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
{
bool block = true;
*locked = false;
spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
block = false;
*locked = true;
} else if (lock->flags == 0) {
block = false;
}
spin_unlock(&lock->lock);
return !block;
}
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
{
int ret = 0;
bool locked;
if (interruptible)
ret = wait_event_interruptible
(lock->queue, __ttm_read_trylock(lock, &locked));
else
wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
if (unlikely(ret != 0)) {
BUG_ON(locked);
return ret;
}
return (locked) ? 0 : -EBUSY;
}
void ttm_write_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
lock->rw = 0;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_write_unlock);
static bool __ttm_write_lock(struct ttm_lock *lock)
{
bool locked = false;
spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
locked = true;
} else {
lock->flags |= TTM_WRITE_LOCK_PENDING;
}
spin_unlock(&lock->lock);
return locked;
}
int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
{
int ret = 0;
if (interruptible) {
ret = wait_event_interruptible(lock->queue,
__ttm_write_lock(lock));
if (unlikely(ret != 0)) {
spin_lock(&lock->lock);
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
} else
wait_event(lock->queue, __ttm_read_lock(lock));
return ret;
}
EXPORT_SYMBOL(ttm_write_lock);
void ttm_write_lock_downgrade(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
lock->rw = 1;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
spin_lock(&lock->lock);
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
ret = -EINVAL;
lock->flags &= ~TTM_VT_LOCK;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
return ret;
}
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
int ret;
*p_base = NULL;
ret = __ttm_vt_unlock(lock);
BUG_ON(ret != 0);
}
static bool __ttm_vt_lock(struct ttm_lock *lock)
{
bool locked = false;
spin_lock(&lock->lock);
if (lock->rw == 0) {
lock->flags &= ~TTM_VT_LOCK_PENDING;
lock->flags |= TTM_VT_LOCK;
locked = true;
} else {
lock->flags |= TTM_VT_LOCK_PENDING;
}
spin_unlock(&lock->lock);
return locked;
}
int ttm_vt_lock(struct ttm_lock *lock,
bool interruptible,
struct ttm_object_file *tfile)
{
int ret = 0;
if (interruptible) {
ret = wait_event_interruptible(lock->queue,
__ttm_vt_lock(lock));
if (unlikely(ret != 0)) {
spin_lock(&lock->lock);
lock->flags &= ~TTM_VT_LOCK_PENDING;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
return ret;
}
} else
wait_event(lock->queue, __ttm_vt_lock(lock));
/*
* Add a base-object, the destructor of which will
* make sure the lock is released if the client dies
* while holding it.
*/
ret = ttm_base_object_init(tfile, &lock->base, false,
ttm_lock_type, &ttm_vt_lock_remove, NULL);
if (ret)
(void)__ttm_vt_unlock(lock);
else
lock->vt_holder = tfile;
return ret;
}
EXPORT_SYMBOL(ttm_vt_lock);
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
lock->base.hash.key, TTM_REF_USAGE);
}
EXPORT_SYMBOL(ttm_vt_unlock);
void ttm_suspend_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
lock->flags &= ~TTM_SUSPEND_LOCK;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
bool locked = false;
spin_lock(&lock->lock);
if (lock->rw == 0) {
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
lock->flags |= TTM_SUSPEND_LOCK;
locked = true;
} else {
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
}
spin_unlock(&lock->lock);
return locked;
}
void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
EXPORT_SYMBOL(ttm_suspend_lock);
| gpl-2.0 |
neobuddy89/vibrant_mackay_kernel | drivers/spi/dw_spi_mid.c | 2544 | 6082 | /*
* dw_spi_mid.c - special handling for DW core on Intel MID platform
*
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include "dw_spi.h"
#ifdef CONFIG_SPI_DW_MID_DMA
#include <linux/intel_mid_dma.h>
#include <linux/pci.h>
struct mid_dma {
struct intel_mid_dma_slave dmas_tx;
struct intel_mid_dma_slave dmas_rx;
};
static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
struct dw_spi *dws = param;
return dws->dmac && (&dws->dmac->dev == chan->device->dev);
}
static int mid_spi_dma_init(struct dw_spi *dws)
{
struct mid_dma *dw_dma = dws->dma_priv;
struct intel_mid_dma_slave *rxs, *txs;
dma_cap_mask_t mask;
/*
* Get pci device for DMA controller, currently it could only
* be the DMA controller of either Moorestown or Medfield
*/
dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
if (!dws->dmac)
dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* 1. Init rx channel */
dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
if (!dws->rxchan)
goto err_exit;
rxs = &dw_dma->dmas_rx;
rxs->hs_mode = LNW_DMA_HW_HS;
rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
dws->rxchan->private = rxs;
/* 2. Init tx channel */
dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
if (!dws->txchan)
goto free_rxchan;
txs = &dw_dma->dmas_tx;
txs->hs_mode = LNW_DMA_HW_HS;
txs->cfg_mode = LNW_DMA_MEM_TO_PER;
dws->txchan->private = txs;
dws->dma_inited = 1;
return 0;
free_rxchan:
dma_release_channel(dws->rxchan);
err_exit:
return -1;
}
static void mid_spi_dma_exit(struct dw_spi *dws)
{
dma_release_channel(dws->txchan);
dma_release_channel(dws->rxchan);
}
/*
* dws->dma_chan_done is cleared before the dma transfer starts,
* callback for rx/tx channel will each increment it by 1.
* Reaching 2 means the whole spi transaction is done.
*/
static void dw_spi_dma_done(void *arg)
{
struct dw_spi *dws = arg;
if (++dws->dma_chan_done != 2)
return;
dw_spi_xfer_done(dws);
}
static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
{
struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
struct dma_chan *txchan, *rxchan;
struct dma_slave_config txconf, rxconf;
u16 dma_ctrl = 0;
/* 1. setup DMA related registers */
if (cs_change) {
spi_enable_chip(dws, 0);
dw_writew(dws, dmardlr, 0xf);
dw_writew(dws, dmatdlr, 0x10);
if (dws->tx_dma)
dma_ctrl |= 0x2;
if (dws->rx_dma)
dma_ctrl |= 0x1;
dw_writew(dws, dmacr, dma_ctrl);
spi_enable_chip(dws, 1);
}
dws->dma_chan_done = 0;
txchan = dws->txchan;
rxchan = dws->rxchan;
/* 2. Prepare the TX dma transfer */
txconf.direction = DMA_TO_DEVICE;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
(unsigned long) &txconf);
memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
dws->tx_sgl.dma_address = dws->tx_dma;
dws->tx_sgl.length = dws->len;
txdesc = txchan->device->device_prep_slave_sg(txchan,
&dws->tx_sgl,
1,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
/* 3. Prepare the RX dma transfer */
rxconf.direction = DMA_FROM_DEVICE;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
(unsigned long) &rxconf);
memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
dws->rx_sgl.dma_address = dws->rx_dma;
dws->rx_sgl.length = dws->len;
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
&dws->rx_sgl,
1,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
/* rx must be started before tx due to spi instinct */
rxdesc->tx_submit(rxdesc);
txdesc->tx_submit(txdesc);
return 0;
}
static struct dw_spi_dma_ops mid_dma_ops = {
.dma_init = mid_spi_dma_init,
.dma_exit = mid_spi_dma_exit,
.dma_transfer = mid_spi_dma_transfer,
};
#endif
/* Some specific info for SPI0 controller on Moorestown */
/* HW info for MRST CLk Control Unit, one 32b reg */
#define MRST_SPI_CLK_BASE 100000000 /* 100m */
#define MRST_CLK_SPI0_REG 0xff11d86c
#define CLK_SPI_BDIV_OFFSET 0
#define CLK_SPI_BDIV_MASK 0x00000007
#define CLK_SPI_CDIV_OFFSET 9
#define CLK_SPI_CDIV_MASK 0x00000e00
#define CLK_SPI_DISABLE_OFFSET 8
int dw_spi_mid_init(struct dw_spi *dws)
{
u32 *clk_reg, clk_cdiv;
clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
if (!clk_reg)
return -ENOMEM;
/* get SPI controller operating freq info */
clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
iounmap(clk_reg);
dws->num_cs = 16;
dws->fifo_len = 40; /* FIFO has 40 words buffer */
#ifdef CONFIG_SPI_DW_MID_DMA
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
if (!dws->dma_priv)
return -ENOMEM;
dws->dma_ops = &mid_dma_ops;
#endif
return 0;
}
| gpl-2.0 |
evitareul/android_kernel_htc_evitareul | drivers/media/dvb/ttpci/budget.c | 3312 | 23510 | /*
* budget.c: driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
* Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* 26feb2004 Support for FS Activy Card (Grundig tuner) by
* Michael Dreher <michael@5dot1.de>,
* Oliver Endriss <o.endriss@gmx.de> and
* Andreas 'randy' Weinberger
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at http://www.linuxtv.org/
*/
#include "budget.h"
#include "stv0299.h"
#include "ves1x93.h"
#include "ves1820.h"
#include "l64781.h"
#include "tda8083.h"
#include "s5h1420.h"
#include "tda10086.h"
#include "tda826x.h"
#include "lnbp21.h"
#include "bsru6.h"
#include "bsbe1.h"
#include "tdhd1.h"
#include "stv6110x.h"
#include "stv090x.h"
#include "isl6423.h"
static int diseqc_method;
module_param(diseqc_method, int, 0444);
MODULE_PARM_DESC(diseqc_method, "Select DiSEqC method for subsystem id 13c2:1003, 0: default, 1: more reliable (for newer revisions only)");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static void Set22K (struct budget *budget, int state)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, (state ? SAA7146_GPIO_OUTHI : SAA7146_GPIO_OUTLO));
}
/* Diseqc functions only for TT Budget card */
/* taken from the Skyvision DVB driver by
Ralph Metzler <rjkm@metzlerbros.de> */
static void DiseqcSendBit (struct budget *budget, int data)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
udelay(data ? 500 : 1000);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
udelay(data ? 1000 : 500);
}
static void DiseqcSendByte (struct budget *budget, int data)
{
int i, par=1, d;
dprintk(2, "budget: %p\n", budget);
for (i=7; i>=0; i--) {
d = (data>>i)&1;
par ^= d;
DiseqcSendBit(budget, d);
}
DiseqcSendBit(budget, par);
}
static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long burst)
{
struct saa7146_dev *dev=budget->dev;
int i;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
mdelay(16);
for (i=0; i<len; i++)
DiseqcSendByte(budget, msg[i]);
mdelay(16);
if (burst!=-1) {
if (burst)
DiseqcSendByte(budget, 0xff);
else {
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(12);
udelay(500);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
}
msleep(20);
}
return 0;
}
/*
* Routines for the Fujitsu Siemens Activy budget card
* 22 kHz tone and DiSEqC are handled by the frontend.
* Voltage must be set here.
* GPIO 1: LNBP EN, GPIO 2: LNBP VSEL
*/
static int SetVoltage_Activy (struct budget *budget, fe_sec_voltage_t voltage)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
switch (voltage) {
case SEC_VOLTAGE_13:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO);
break;
case SEC_VOLTAGE_18:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
break;
case SEC_VOLTAGE_OFF:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO);
break;
default:
return -EINVAL;
}
return 0;
}
static int siemens_budget_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
return SetVoltage_Activy (budget, voltage);
}
static int budget_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
Set22K (budget, 1);
break;
case SEC_TONE_OFF:
Set22K (budget, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0);
return 0;
}
static int budget_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
SendDiSEqCMsg (budget, 0, NULL, minicmd);
return 0;
}
static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
u32 div = (params->frequency + 479500) / 125;
if (params->frequency > 2000000) pwr = 3;
else if (params->frequency > 1800000) pwr = 2;
else if (params->frequency > 1600000) pwr = 1;
else if (params->frequency > 1200000) pwr = 0;
else if (params->frequency >= 1100000) pwr = 1;
else pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = ((div & 0x18000) >> 10) | 0x95;
buf[3] = (pwr << 6) | 0x30;
// NOTE: since we're using a prescaler of 2, we set the
// divisor frequency to 62.5kHz and divide by 125 above
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct ves1x93_config alps_bsrv2_config =
{
.demod_address = 0x08,
.xin = 90100000UL,
.invert_pwm = 0,
};
static int alps_tdbe2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
div = (params->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
data[3] = (params->frequency < 174000000 ? 0x88 : params->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct ves1820_config alps_tdbe2_config = {
.demod_address = 0x09,
.xin = 57840000UL,
.invert = 1,
.selagc = VES1820_SELAGC_SIGNAMPERR,
};
static int grundig_29504_401_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
{
struct budget *budget = fe->dvb->priv;
u8 *tuner_addr = fe->tuner_priv;
u32 div;
u8 cfg, cpump, band_select;
u8 data[4];
struct i2c_msg msg = { .flags = 0, .buf = data, .len = sizeof(data) };
if (tuner_addr)
msg.addr = *tuner_addr;
else
msg.addr = 0x61;
div = (36125000 + params->frequency) / 166666;
cfg = 0x88;
if (params->frequency < 175000000) cpump = 2;
else if (params->frequency < 390000000) cpump = 1;
else if (params->frequency < 470000000) cpump = 2;
else if (params->frequency < 750000000) cpump = 1;
else cpump = 3;
if (params->frequency < 175000000) band_select = 0x0e;
else if (params->frequency < 470000000) band_select = 0x05;
else band_select = 0x03;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = ((div >> 10) & 0x60) | cfg;
data[3] = (cpump << 6) | band_select;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct l64781_config grundig_29504_401_config = {
.demod_address = 0x55,
};
static struct l64781_config grundig_29504_401_config_activy = {
.demod_address = 0x54,
};
static u8 tuner_address_grundig_29504_401_activy = 0x60;
static int grundig_29504_451_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = params->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
data[3] = 0x00;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct tda8083_config grundig_29504_451_config = {
.demod_address = 0x68,
};
static int s5h1420_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = params->frequency / 1000;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xc2;
if (div < 1450)
data[3] = 0x00;
else if (div < 1850)
data[3] = 0x40;
else if (div < 2000)
data[3] = 0x80;
else
data[3] = 0xc0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct s5h1420_config s5h1420_config = {
.demod_address = 0x53,
.invert = 1,
.cdclk_polarity = 1,
};
static struct tda10086_config tda10086_config = {
.demod_address = 0x0e,
.invert = 0,
.diseqc_tone = 1,
.xtal_freq = TDA10086_XTAL_16M,
};
static struct stv0299_config alps_bsru6_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsru6_inittab,
.mclk = 88000000UL,
.invert = 1,
.op0_off = 1,
.min_delay_ms = 100,
.set_symbol_rate = alps_bsru6_set_symbol_rate,
};
static struct stv0299_config alps_bsbe1_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsbe1_inittab,
.mclk = 88000000UL,
.invert = 1,
.op0_off = 1,
.min_delay_ms = 100,
.set_symbol_rate = alps_bsbe1_set_symbol_rate,
};
static int alps_tdhd1_204_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name)
{
struct budget *budget = (struct budget *)fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
static int i2c_readreg(struct i2c_adapter *i2c, u8 adr, u8 reg)
{
u8 val;
struct i2c_msg msg[] = {
{ .addr = adr, .flags = 0, .buf = ®, .len = 1 },
{ .addr = adr, .flags = I2C_M_RD, .buf = &val, .len = 1 }
};
return (i2c_transfer(i2c, msg, 2) != 2) ? -EIO : val;
}
static u8 read_pwm(struct budget* budget)
{
u8 b = 0xff;
u8 pwm;
struct i2c_msg msg[] = { { .addr = 0x50,.flags = 0,.buf = &b,.len = 1 },
{ .addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} };
if ((i2c_transfer(&budget->i2c_adap, msg, 2) != 2) || (pwm == 0xff))
pwm = 0x48;
return pwm;
}
static struct stv090x_config tt1600_stv090x_config = {
.device = STV0903,
.demod_mode = STV090x_SINGLE,
.clk_mode = STV090x_CLK_EXT,
.xtal = 13500000,
.address = 0x68,
.ts1_mode = STV090x_TSMODE_DVBCI,
.ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
.repeater_level = STV090x_RPTLEVEL_16,
.tuner_init = NULL,
.tuner_sleep = NULL,
.tuner_set_mode = NULL,
.tuner_set_frequency = NULL,
.tuner_get_frequency = NULL,
.tuner_set_bandwidth = NULL,
.tuner_get_bandwidth = NULL,
.tuner_set_bbgain = NULL,
.tuner_get_bbgain = NULL,
.tuner_set_refclk = NULL,
.tuner_get_status = NULL,
};
static struct stv6110x_config tt1600_stv6110x_config = {
.addr = 0x60,
.refclk = 27000000,
.clk_div = 2,
};
static struct isl6423_config tt1600_isl6423_config = {
.current_max = SEC_CURRENT_515m,
.curlim = SEC_CURRENT_LIM_ON,
.mod_extern = 1,
.addr = 0x08,
};
static void frontend_init(struct budget *budget)
{
(void)alps_bsbe1_config; /* avoid warning */
switch(budget->dev->pci->subsystem_device) {
case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659))
case 0x1013:
// try the ALPS BSRV2 first of all
budget->dvb_frontend = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
break;
}
// try the ALPS BSRU6 now
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
if (budget->dev->pci->subsystem_device == 0x1003 && diseqc_method == 0) {
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
}
break;
}
break;
case 0x1004: // Hauppauge/TT DVB-C budget (ves1820/ALPS TDBE2(sp5659))
budget->dvb_frontend = dvb_attach(ves1820_attach, &alps_tdbe2_config, &budget->i2c_adap, read_pwm(budget));
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
break;
}
break;
case 0x1005: // Hauppauge/TT Nova-T budget (L64781/Grundig 29504-401(tsa5060))
budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
budget->dvb_frontend->tuner_priv = NULL;
break;
}
break;
case 0x4f60: /* Fujitsu Siemens Activy Budget-S PCI rev AL (stv0299/tsa5059) */
{
int subtype = i2c_readreg(&budget->i2c_adap, 0x50, 0x67);
if (subtype < 0)
break;
/* fixme: find a better way to identify the card */
if (subtype < 0x36) {
/* assume ALPS BSRU6 */
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: tuner ALPS BSRU6 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
break;
}
} else {
/* assume ALPS BSBE1 */
/* reset tuner */
saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTHI);
msleep(250);
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: tuner ALPS BSBE1 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
break;
}
}
break;
}
case 0x4f61: // Fujitsu Siemens Activy Budget-S PCI rev GR (tda8083/Grundig 29504-451(tsa5522))
budget->dvb_frontend = dvb_attach(tda8083_attach, &grundig_29504_451_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
}
break;
case 0x5f60: /* Fujitsu Siemens Activy Budget-T PCI rev AL (tda10046/ALPS TDHD1-204A) */
budget->dvb_frontend = dvb_attach(tda10046_attach, &alps_tdhd1_204a_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdhd1_204a_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
}
break;
case 0x5f61: /* Fujitsu Siemens Activy Budget-T PCI rev GR (L64781/Grundig 29504-401(tsa5060)) */
budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->tuner_priv = &tuner_address_grundig_29504_401_activy;
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
}
break;
case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260))
budget->dvb_frontend = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
// gpio2 is connected to CLB - reset it + leave it high
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(1);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(1);
budget->dvb_frontend = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
if (dvb_attach(tda826x_attach, budget->dvb_frontend, 0x60, &budget->i2c_adap, 0) == NULL)
printk("%s: No tda826x found!\n", __func__);
if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
case 0x101c: { /* TT S2-1600 */
struct stv6110x_devctl *ctl;
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(250);
budget->dvb_frontend = dvb_attach(stv090x_attach,
&tt1600_stv090x_config,
&budget->i2c_adap,
STV090x_DEMODULATOR_0);
if (budget->dvb_frontend) {
ctl = dvb_attach(stv6110x_attach,
budget->dvb_frontend,
&tt1600_stv6110x_config,
&budget->i2c_adap);
if (ctl) {
tt1600_stv090x_config.tuner_init = ctl->tuner_init;
tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep;
tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
/* call the init function once to initialize
tuner's clock output divider and demod's
master clock */
if (budget->dvb_frontend->ops.init)
budget->dvb_frontend->ops.init(budget->dvb_frontend);
if (dvb_attach(isl6423_attach,
budget->dvb_frontend,
&budget->i2c_adap,
&tt1600_isl6423_config) == NULL) {
printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__);
goto error_out;
}
} else {
printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
goto error_out;
}
}
}
break;
}
if (budget->dvb_frontend == NULL) {
printk("budget: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget->dev->pci->vendor,
budget->dev->pci->device,
budget->dev->pci->subsystem_vendor,
budget->dev->pci->subsystem_device);
} else {
if (dvb_register_frontend(&budget->dvb_adapter, budget->dvb_frontend))
goto error_out;
}
return;
error_out:
printk("budget: Frontend registration failed!\n");
dvb_frontend_detach(budget->dvb_frontend);
budget->dvb_frontend = NULL;
return;
}
static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
{
struct budget *budget = NULL;
int err;
budget = kmalloc(sizeof(struct budget), GFP_KERNEL);
if( NULL == budget ) {
return -ENOMEM;
}
dprintk(2, "dev:%p, info:%p, budget:%p\n", dev, info, budget);
dev->ext_priv = budget;
err = ttpci_budget_init(budget, dev, info, THIS_MODULE, adapter_nr);
if (err) {
printk("==> failed\n");
kfree (budget);
return err;
}
budget->dvb_adapter.priv = budget;
frontend_init(budget);
ttpci_budget_init_hooks(budget);
return 0;
}
static int budget_detach (struct saa7146_dev* dev)
{
struct budget *budget = (struct budget*) dev->ext_priv;
int err;
if (budget->dvb_frontend) {
dvb_unregister_frontend(budget->dvb_frontend);
dvb_frontend_detach(budget->dvb_frontend);
}
err = ttpci_budget_deinit (budget);
kfree (budget);
dev->ext_priv = NULL;
return err;
}
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbs, "TT-Budget/WinTV-NOVA-S PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC);
MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(tt1600, "TT-Budget S2-1600 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsact1, "Fujitsu Siemens Activy Budget-T PCI (rev AL/ALPS TDHD1-204A)", BUDGET_FS_ACTIVY);
static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1003),
MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004),
MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005),
MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018),
MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c),
MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60),
MAKE_EXTENSION_PCI(fsact, 0x1131, 0x5f61),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget dvb",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
.pci_tbl = pci_tbl,
.attach = budget_attach,
.detach = budget_detach,
.irq_mask = MASK_10,
.irq_func = ttpci_budget_irq10_handler,
};
static int __init budget_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_init);
module_exit(budget_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
"budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
| gpl-2.0 |
jmw7912/wat-0016-kernel-2.6.37 | fs/jffs2/symlink.c | 3568 | 1873 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include "nodelist.h"
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
const struct inode_operations jffs2_symlink_inode_operations =
{
.readlink = generic_readlink,
.follow_link = jffs2_follow_link,
.check_acl = jffs2_check_acl,
.setattr = jffs2_setattr,
.setxattr = jffs2_setxattr,
.getxattr = jffs2_getxattr,
.listxattr = jffs2_listxattr,
.removexattr = jffs2_removexattr
};
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
char *p = (char *)f->target;
/*
* We don't acquire the f->sem mutex here since the only data we
* use is f->target.
*
* 1. If we are here the inode has already built and f->target has
* to point to the target path.
* 2. Nobody uses f->target (if the inode is symlink's inode). The
* exception is inode freeing function which frees f->target. But
* it can't be called while we are here and before VFS has
* stopped using our f->target string which we provide by means of
* nd_set_link() call.
*/
if (!p) {
printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n");
p = ERR_PTR(-EIO);
}
D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target));
nd_set_link(nd, p);
/*
* We will unlock the f->sem mutex but VFS will use the f->target string. This is safe
* since the only way that may cause f->target to be changed is iput() operation.
* But VFS will not use f->target after iput() has been called.
*/
return NULL;
}
| gpl-2.0 |
mickael-guene/kernel | arch/mips/lantiq/falcon/reset.c | 4336 | 2133 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
* Copyright (C) 2012 John Crispin <blogic@openwrt.org>
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <linux/export.h>
#include <lantiq_soc.h>
/* CPU0 Reset Source Register */
#define SYS1_CPU0RS 0x0040
/* reset cause mask */
#define CPU0RS_MASK 0x0003
/* CPU0 Boot Mode Register */
#define SYS1_BM 0x00a0
/* boot mode mask */
#define BM_MASK 0x0005
/* allow platform code to find out what surce we booted from */
unsigned char ltq_boot_select(void)
{
return ltq_sys1_r32(SYS1_BM) & BM_MASK;
}
/* allow the watchdog driver to find out what the boot reason was */
int ltq_reset_cause(void)
{
return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
}
EXPORT_SYMBOL_GPL(ltq_reset_cause);
#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
#define BOOT_PW1 0x4C545100
#define BOOT_PW2 0x0051544C
#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
#define WDT_PW1 0x00BE0000
#define WDT_PW2 0x00DC0000
static void machine_restart(char *command)
{
local_irq_disable();
/* reboot magic */
ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
/* watchdog magic */
ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
ltq_w32(WDT_PW2 |
(0x3 << 26) | /* PWL */
(0x2 << 24) | /* CLKDIV */
(0x1 << 31) | /* enable */
(1), /* reload */
(void *)WDT_REG_BASE);
unreachable();
}
static void machine_halt(void)
{
local_irq_disable();
unreachable();
}
static void machine_power_off(void)
{
local_irq_disable();
unreachable();
}
static int __init mips_reboot_setup(void)
{
_machine_restart = machine_restart;
_machine_halt = machine_halt;
pm_power_off = machine_power_off;
return 0;
}
arch_initcall(mips_reboot_setup);
| gpl-2.0 |
vic3t3chn0/kernel_ubuntu_togari | drivers/net/wireless/b43legacy/ilt.c | 4336 | 10742 | /*
Broadcom B43legacy wireless driver
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
Michael Buesch <mbuesch@freenet.de>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43legacy.h"
#include "ilt.h"
#include "phy.h"
/**** Initial Internal Lookup Tables ****/
const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = {
0xFEB93FFD, 0xFEC63FFD, /* 0 */
0xFED23FFD, 0xFEDF3FFD,
0xFEEC3FFE, 0xFEF83FFE,
0xFF053FFE, 0xFF113FFE,
0xFF1E3FFE, 0xFF2A3FFF, /* 8 */
0xFF373FFF, 0xFF443FFF,
0xFF503FFF, 0xFF5D3FFF,
0xFF693FFF, 0xFF763FFF,
0xFF824000, 0xFF8F4000, /* 16 */
0xFF9B4000, 0xFFA84000,
0xFFB54000, 0xFFC14000,
0xFFCE4000, 0xFFDA4000,
0xFFE74000, 0xFFF34000, /* 24 */
0x00004000, 0x000D4000,
0x00194000, 0x00264000,
0x00324000, 0x003F4000,
0x004B4000, 0x00584000, /* 32 */
0x00654000, 0x00714000,
0x007E4000, 0x008A3FFF,
0x00973FFF, 0x00A33FFF,
0x00B03FFF, 0x00BC3FFF, /* 40 */
0x00C93FFF, 0x00D63FFF,
0x00E23FFE, 0x00EF3FFE,
0x00FB3FFE, 0x01083FFE,
0x01143FFE, 0x01213FFD, /* 48 */
0x012E3FFD, 0x013A3FFD,
0x01473FFD,
};
const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = {
0xDB93CB87, 0xD666CF64, /* 0 */
0xD1FDD358, 0xCDA6D826,
0xCA38DD9F, 0xC729E2B4,
0xC469E88E, 0xC26AEE2B,
0xC0DEF46C, 0xC073FA62, /* 8 */
0xC01D00D5, 0xC0760743,
0xC1560D1E, 0xC2E51369,
0xC4ED18FF, 0xC7AC1ED7,
0xCB2823B2, 0xCEFA28D9, /* 16 */
0xD2F62D3F, 0xD7BB3197,
0xDCE53568, 0xE1FE3875,
0xE7D13B35, 0xED663D35,
0xF39B3EC4, 0xF98E3FA7, /* 24 */
0x00004000, 0x06723FA7,
0x0C653EC4, 0x129A3D35,
0x182F3B35, 0x1E023875,
0x231B3568, 0x28453197, /* 32 */
0x2D0A2D3F, 0x310628D9,
0x34D823B2, 0x38541ED7,
0x3B1318FF, 0x3D1B1369,
0x3EAA0D1E, 0x3F8A0743, /* 40 */
0x3FE300D5, 0x3F8DFA62,
0x3F22F46C, 0x3D96EE2B,
0x3B97E88E, 0x38D7E2B4,
0x35C8DD9F, 0x325AD826, /* 48 */
0x2E03D358, 0x299ACF64,
0x246DCB87,
};
const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = {
0x0082, 0x0082, 0x0102, 0x0182, /* 0 */
0x0202, 0x0282, 0x0302, 0x0382,
0x0402, 0x0482, 0x0502, 0x0582,
0x05E2, 0x0662, 0x06E2, 0x0762,
0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */
0x09C2, 0x0A22, 0x0AA2, 0x0B02,
0x0B82, 0x0BE2, 0x0C62, 0x0CC2,
0x0D42, 0x0DA2, 0x0E02, 0x0E62,
0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */
0x1062, 0x10C2, 0x1122, 0x1182,
0x11E2, 0x1242, 0x12A2, 0x12E2,
0x1342, 0x13A2, 0x1402, 0x1442,
0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */
0x15E2, 0x1622, 0x1662, 0x16C1,
0x1701, 0x1741, 0x1781, 0x17E1,
0x1821, 0x1861, 0x18A1, 0x18E1,
0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */
0x1A21, 0x1A61, 0x1AA1, 0x1AC1,
0x1B01, 0x1B41, 0x1B81, 0x1BA1,
0x1BE1, 0x1C21, 0x1C41, 0x1C81,
0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */
0x1D61, 0x1DA1, 0x1DC1, 0x1E01,
0x1E21, 0x1E61, 0x1E81, 0x1EA1,
0x1EE1, 0x1F01, 0x1F21, 0x1F41,
0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */
0x2001, 0x2041, 0x2061, 0x2081,
0x20A1, 0x20C1, 0x20E1, 0x2101,
0x2121, 0x2141, 0x2161, 0x2181,
0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */
0x2221, 0x2241, 0x2261, 0x2281,
0x22A1, 0x22C1, 0x22C1, 0x22E1,
0x2301, 0x2321, 0x2341, 0x2361,
0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */
0x23E1, 0x23E1, 0x2401, 0x2421,
0x2441, 0x2441, 0x2461, 0x2481,
0x2481, 0x24A1, 0x24C1, 0x24C1,
0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */
0x2541, 0x2541, 0x2561, 0x2561,
0x2581, 0x25A1, 0x25A1, 0x25C1,
0x25C1, 0x25E1, 0x2601, 0x2601,
0x2621, 0x2621, 0x2641, 0x2641, /* 160 */
0x2661, 0x2661, 0x2681, 0x2681,
0x26A1, 0x26A1, 0x26C1, 0x26C1,
0x26E1, 0x26E1, 0x2701, 0x2701,
0x2721, 0x2721, 0x2740, 0x2740, /* 176 */
0x2760, 0x2760, 0x2780, 0x2780,
0x2780, 0x27A0, 0x27A0, 0x27C0,
0x27C0, 0x27E0, 0x27E0, 0x27E0,
0x2800, 0x2800, 0x2820, 0x2820, /* 192 */
0x2820, 0x2840, 0x2840, 0x2840,
0x2860, 0x2860, 0x2880, 0x2880,
0x2880, 0x28A0, 0x28A0, 0x28A0,
0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */
0x28E0, 0x28E0, 0x2900, 0x2900,
0x2900, 0x2920, 0x2920, 0x2920,
0x2940, 0x2940, 0x2940, 0x2960,
0x2960, 0x2960, 0x2960, 0x2980, /* 224 */
0x2980, 0x2980, 0x29A0, 0x29A0,
0x29A0, 0x29A0, 0x29C0, 0x29C0,
0x29C0, 0x29E0, 0x29E0, 0x29E0,
0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */
0x2A00, 0x2A20, 0x2A20, 0x2A20,
0x2A20, 0x2A40, 0x2A40, 0x2A40,
0x2A40, 0x2A60, 0x2A60, 0x2A60,
};
const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = {
0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */
0x05A9, 0x0669, 0x0709, 0x0789,
0x0829, 0x08A9, 0x0929, 0x0989,
0x0A09, 0x0A69, 0x0AC9, 0x0B29,
0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */
0x0D09, 0x0D69, 0x0DA9, 0x0E09,
0x0E69, 0x0EA9, 0x0F09, 0x0F49,
0x0FA9, 0x0FE9, 0x1029, 0x1089,
0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */
0x11E9, 0x1229, 0x1289, 0x12C9,
0x1309, 0x1349, 0x1389, 0x13C9,
0x1409, 0x1449, 0x14A9, 0x14E9,
0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */
0x1629, 0x1669, 0x16A9, 0x16E8,
0x1728, 0x1768, 0x17A8, 0x17E8,
0x1828, 0x1868, 0x18A8, 0x18E8,
0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */
0x1A28, 0x1A68, 0x1AA8, 0x1AE8,
0x1B28, 0x1B68, 0x1BA8, 0x1BE8,
0x1C28, 0x1C68, 0x1CA8, 0x1CE8,
0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */
0x1E48, 0x1E88, 0x1EC8, 0x1F08,
0x1F48, 0x1F88, 0x1FE8, 0x2028,
0x2068, 0x20A8, 0x2108, 0x2148,
0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */
0x22C8, 0x2308, 0x2348, 0x23A8,
0x23E8, 0x2448, 0x24A8, 0x24E8,
0x2548, 0x25A8, 0x2608, 0x2668,
0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */
0x2847, 0x28C7, 0x2947, 0x29A7,
0x2A27, 0x2AC7, 0x2B47, 0x2BE7,
0x2CA7, 0x2D67, 0x2E47, 0x2F67,
0x3247, 0x3526, 0x3646, 0x3726, /* 128 */
0x3806, 0x38A6, 0x3946, 0x39E6,
0x3A66, 0x3AE6, 0x3B66, 0x3BC6,
0x3C45, 0x3CA5, 0x3D05, 0x3D85,
0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */
0x3F45, 0x3FA5, 0x4005, 0x4045,
0x40A5, 0x40E5, 0x4145, 0x4185,
0x41E5, 0x4225, 0x4265, 0x42C5,
0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */
0x4424, 0x4464, 0x44C4, 0x4504,
0x4544, 0x4584, 0x45C4, 0x4604,
0x4644, 0x46A4, 0x46E4, 0x4724,
0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */
0x4864, 0x48A4, 0x48E4, 0x4924,
0x4964, 0x49A4, 0x49E4, 0x4A24,
0x4A64, 0x4AA4, 0x4AE4, 0x4B23,
0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */
0x4C63, 0x4CA3, 0x4CE3, 0x4D23,
0x4D63, 0x4DA3, 0x4DE3, 0x4E23,
0x4E63, 0x4EA3, 0x4EE3, 0x4F23,
0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */
0x5083, 0x50C3, 0x5103, 0x5143,
0x5183, 0x51E2, 0x5222, 0x5262,
0x52A2, 0x52E2, 0x5342, 0x5382,
0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */
0x5502, 0x5542, 0x55A2, 0x55E2,
0x5642, 0x5682, 0x56E2, 0x5722,
0x5782, 0x57E1, 0x5841, 0x58A1,
0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */
0x5AA1, 0x5B01, 0x5B81, 0x5BE1,
0x5C61, 0x5D01, 0x5D80, 0x5E20,
0x5EE0, 0x5FA0, 0x6080, 0x61C0,
};
const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = {
0x0001, 0x0001, 0x0001, 0xFFFE,
0xFFFE, 0x3FFF, 0x1000, 0x0393,
};
const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = {
0x4C4C, 0x4C4C, 0x4C4C, 0x2D36,
0x4C4C, 0x4C4C, 0x4C4C, 0x2D36,
};
const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = {
0x013C, 0x01F5, 0x031A, 0x0631,
0x0001, 0x0001, 0x0001, 0x0001,
};
const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = {
0x5484, 0x3C40, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
};
const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = {
0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */
0x2F2D, 0x2A2A, 0x2527, 0x1F21,
0x1A1D, 0x1719, 0x1616, 0x1414,
0x1414, 0x1400, 0x1414, 0x1614,
0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */
0x2A27, 0x2F2A, 0x332D, 0x3B35,
0x5140, 0x6C62, 0x0077,
};
const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = {
0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */
0xB2B0, 0xADAD, 0xA7A9, 0x9FA1,
0x969B, 0x9195, 0x8F8F, 0x8A8A,
0x8A8A, 0x8A00, 0x8A8A, 0x8F8A,
0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */
0xADA9, 0xB2AD, 0xB6B0, 0xBCB7,
0xCBC0, 0xD8D4, 0x00DD,
};
const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = {
0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */
0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
0xA4A4, 0xA400, 0xA4A4, 0xA4A4,
0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */
0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
0xA4A4, 0xA4A4, 0x00A4,
};
const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = {
0x007A, 0x0075, 0x0071, 0x006C, /* 0 */
0x0067, 0x0063, 0x005E, 0x0059,
0x0054, 0x0050, 0x004B, 0x0046,
0x0042, 0x003D, 0x003D, 0x003D,
0x003D, 0x003D, 0x003D, 0x003D, /* 16 */
0x003D, 0x003D, 0x003D, 0x003D,
0x003D, 0x003D, 0x0000, 0x003D,
0x003D, 0x003D, 0x003D, 0x003D,
0x003D, 0x003D, 0x003D, 0x003D, /* 32 */
0x003D, 0x003D, 0x003D, 0x003D,
0x0042, 0x0046, 0x004B, 0x0050,
0x0054, 0x0059, 0x005E, 0x0063,
0x0067, 0x006C, 0x0071, 0x0075, /* 48 */
0x007A,
};
const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = {
0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */
0x00D6, 0x00D4, 0x00D2, 0x00CF,
0x00CD, 0x00CA, 0x00C7, 0x00C4,
0x00C1, 0x00BE, 0x00BE, 0x00BE,
0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */
0x00BE, 0x00BE, 0x00BE, 0x00BE,
0x00BE, 0x00BE, 0x0000, 0x00BE,
0x00BE, 0x00BE, 0x00BE, 0x00BE,
0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */
0x00BE, 0x00BE, 0x00BE, 0x00BE,
0x00C1, 0x00C4, 0x00C7, 0x00CA,
0x00CD, 0x00CF, 0x00D2, 0x00D4,
0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */
0x00DE,
};
/**** Helper functions to access the device Internal Lookup Tables ****/
void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val);
}
void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2,
(val & 0xFFFF0000) >> 16);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1,
val & 0x0000FFFF);
}
u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1);
}
| gpl-2.0 |
CMRemix/android_kernel_samsung_hlte | tools/perf/util/top.c | 4848 | 3284 | /*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Refactored from builtin-top.c, see that files for further copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "cpumap.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "parse-events.h"
#include "symbol.h"
#include "top.h"
#include <inttypes.h>
#define SNPRINTF(buf, size, fmt, args...) \
({ \
size_t r = snprintf(buf, size, fmt, ## args); \
r > size ? size : r; \
})
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
{
float samples_per_sec = top->samples / top->delay_secs;
float ksamples_per_sec = top->kernel_samples / top->delay_secs;
float esamples_percent = (100.0 * top->exact_samples) / top->samples;
size_t ret = 0;
if (!perf_guest) {
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
" exact: %4.1f%% [", samples_per_sec,
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
samples_per_sec)),
esamples_percent);
} else {
float us_samples_per_sec = top->us_samples / top->delay_secs;
float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
" guest kernel:%4.1f%% guest us:%4.1f%%"
" exact: %4.1f%% [", samples_per_sec,
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_kernel_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_us_samples_per_sec) /
samples_per_sec)),
esamples_percent);
}
if (top->evlist->nr_entries == 1) {
struct perf_evsel *first;
first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->attr.sample_period,
top->freq ? "Hz" : "");
}
ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel));
ret += SNPRINTF(bf + ret, size - ret, "], ");
if (top->target_pid)
ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
top->target_pid);
else if (top->target_tid)
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
top->target_tid);
else if (top->uid_str != NULL)
ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
top->uid_str);
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
if (top->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
else {
if (top->target_tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
top->evlist->cpus->nr,
top->evlist->cpus->nr > 1 ? "s" : "");
}
return ret;
}
void perf_top__reset_sample_counters(struct perf_top *top)
{
top->samples = top->us_samples = top->kernel_samples =
top->exact_samples = top->guest_kernel_samples =
top->guest_us_samples = 0;
}
| gpl-2.0 |
rachitrawat/Vengeance-Kernel-MSM7x27-JLO | block/blk-map.c | 7408 | 8428 | /*
* Functions related to mapping data to requests
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include "blk.h"
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->__data_len += bio->bi_size;
}
return 0;
}
static int __blk_rq_unmap_user(struct bio *bio)
{
int ret = 0;
if (bio) {
if (bio_flagged(bio, BIO_USER_MAPPED))
bio_unmap_user(bio);
else
ret = bio_uncopy_user(bio);
}
return ret;
}
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned int len, gfp_t gfp_mask)
{
unsigned long uaddr;
struct bio *bio, *orig_bio;
int reading, ret;
reading = rq_data_dir(rq) == READ;
/*
* if alignment requirement is satisfied, map in user pages for
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
if (blk_rq_aligned(q, uaddr, len) && !map_data)
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio->bi_flags |= (1 << BIO_NULL_MAPPED);
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
return bio->bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}
/**
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @ubuf: the user buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned long len, gfp_t gfp_mask)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
int ret;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
if (!ubuf && (!map_data || !map_data->null_mapped))
return -EINVAL;
while (bytes_read != len) {
unsigned long map_len, end, start;
map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
>> PAGE_SHIFT;
start = (unsigned long)ubuf >> PAGE_SHIFT;
/*
* A bad offset could cause us to require BIO_MAX_PAGES + 1
* pages. If this happens we just lower the requested
* mapping len by a page so that we can fit
*/
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
gfp_mask);
if (ret < 0)
goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret;
ubuf += ret;
if (map_data)
map_data->offset += ret;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
rq->bio = NULL;
return ret;
}
EXPORT_SYMBOL(blk_rq_map_user);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
* @len: I/O byte count
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct sg_iovec *iov,
int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
int unaligned = 0;
if (!iov || iov_count <= 0)
return -EINVAL;
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
if (!iov[i].iov_len)
return -EINVAL;
/*
* Keep going so we check length of all segments
*/
if (uaddr & queue_dma_alignment(q))
unaligned = 1;
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
gfp_mask);
else
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (bio->bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio, 0);
__blk_rq_unmap_user(bio);
return -EINVAL;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
*
* Description:
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
* the I/O completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct bio *bio)
{
struct bio *mapped_bio;
int ret = 0, ret2;
while (bio) {
mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
ret2 = __blk_rq_unmap_user(mapped_bio);
if (ret2 && !ret)
ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
}
return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
* buffer is used. Can be called multple times to append multple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio;
int ret;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
bio = bio_map_kern(q, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (!reading)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
ret = blk_rq_append_bio(q, rq, bio);
if (unlikely(ret)) {
/* request is too big */
bio_put(bio);
return ret;
}
blk_queue_bounce(q, &rq->bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
| gpl-2.0 |
detule/linux-msm-d2 | sound/pci/echoaudio/indigoiox_dsp.c | 12528 | 2197 | /************************************************************************
This file is part of Echo Digital Audio's generic driver library.
Copyright Echo Digital Audio Corporation (c) 1998 - 2005
All rights reserved
www.echoaudio.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
*************************************************************************/
static int update_vmixer_level(struct echoaudio *chip);
static int set_vmixer_gain(struct echoaudio *chip, u16 output,
u16 pipe, int gain);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Indigo IOx\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX))
return -ENODEV;
err = init_dsp_comm_page(chip);
if (err < 0) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->dsp_code_to_load = FW_INDIGO_IOX_DSP;
/* Since this card has no ASIC, mark it as loaded so everything
works OK */
chip->asic_loaded = TRUE;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
err = load_firmware(chip);
if (err < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
return init_line_levels(chip);
}
| gpl-2.0 |
Altaf-Mahdi/i9505 | sound/pci/echoaudio/indigoiox_dsp.c | 12528 | 2197 | /************************************************************************
This file is part of Echo Digital Audio's generic driver library.
Copyright Echo Digital Audio Corporation (c) 1998 - 2005
All rights reserved
www.echoaudio.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
*************************************************************************/
static int update_vmixer_level(struct echoaudio *chip);
static int set_vmixer_gain(struct echoaudio *chip, u16 output,
u16 pipe, int gain);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Indigo IOx\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX))
return -ENODEV;
err = init_dsp_comm_page(chip);
if (err < 0) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->dsp_code_to_load = FW_INDIGO_IOX_DSP;
/* Since this card has no ASIC, mark it as loaded so everything
works OK */
chip->asic_loaded = TRUE;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
err = load_firmware(chip);
if (err < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
return init_line_levels(chip);
}
| gpl-2.0 |
arco/samsung-kernel-msm7x30 | sound/aoa/core/core.c | 14832 | 3387 | /*
* Apple Onboard Audio driver core
*
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* GPL v2, can be found in COPYING.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include "../aoa.h"
#include "alsa.h"
MODULE_DESCRIPTION("Apple Onboard Audio Sound Driver");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");
/* We allow only one fabric. This simplifies things,
* and more don't really make that much sense */
static struct aoa_fabric *fabric;
static LIST_HEAD(codec_list);
static int attach_codec_to_fabric(struct aoa_codec *c)
{
int err;
if (!try_module_get(c->owner))
return -EBUSY;
/* found_codec has to be assigned */
err = -ENOENT;
if (fabric->found_codec)
err = fabric->found_codec(c);
if (err) {
module_put(c->owner);
printk(KERN_ERR "snd-aoa: fabric didn't like codec %s\n",
c->name);
return err;
}
c->fabric = fabric;
err = 0;
if (c->init)
err = c->init(c);
if (err) {
printk(KERN_ERR "snd-aoa: codec %s didn't init\n", c->name);
c->fabric = NULL;
if (fabric->remove_codec)
fabric->remove_codec(c);
module_put(c->owner);
return err;
}
if (fabric->attached_codec)
fabric->attached_codec(c);
return 0;
}
int aoa_codec_register(struct aoa_codec *codec)
{
int err = 0;
/* if there's a fabric already, we can tell if we
* will want to have this codec, so propagate error
* through. Otherwise, this will happen later... */
if (fabric)
err = attach_codec_to_fabric(codec);
if (!err)
list_add(&codec->list, &codec_list);
return err;
}
EXPORT_SYMBOL_GPL(aoa_codec_register);
void aoa_codec_unregister(struct aoa_codec *codec)
{
list_del(&codec->list);
if (codec->fabric && codec->exit)
codec->exit(codec);
if (fabric && fabric->remove_codec)
fabric->remove_codec(codec);
codec->fabric = NULL;
module_put(codec->owner);
}
EXPORT_SYMBOL_GPL(aoa_codec_unregister);
int aoa_fabric_register(struct aoa_fabric *new_fabric, struct device *dev)
{
struct aoa_codec *c;
int err;
/* allow querying for presence of fabric
* (i.e. do this test first!) */
if (new_fabric == fabric) {
err = -EALREADY;
goto attach;
}
if (fabric)
return -EEXIST;
if (!new_fabric)
return -EINVAL;
err = aoa_alsa_init(new_fabric->name, new_fabric->owner, dev);
if (err)
return err;
fabric = new_fabric;
attach:
list_for_each_entry(c, &codec_list, list) {
if (c->fabric != fabric)
attach_codec_to_fabric(c);
}
return err;
}
EXPORT_SYMBOL_GPL(aoa_fabric_register);
void aoa_fabric_unregister(struct aoa_fabric *old_fabric)
{
struct aoa_codec *c;
if (fabric != old_fabric)
return;
list_for_each_entry(c, &codec_list, list) {
if (c->fabric)
aoa_fabric_unlink_codec(c);
}
aoa_alsa_cleanup();
fabric = NULL;
}
EXPORT_SYMBOL_GPL(aoa_fabric_unregister);
void aoa_fabric_unlink_codec(struct aoa_codec *codec)
{
if (!codec->fabric) {
printk(KERN_ERR "snd-aoa: fabric unassigned "
"in aoa_fabric_unlink_codec\n");
dump_stack();
return;
}
if (codec->exit)
codec->exit(codec);
if (codec->fabric->remove_codec)
codec->fabric->remove_codec(codec);
codec->fabric = NULL;
module_put(codec->owner);
}
EXPORT_SYMBOL_GPL(aoa_fabric_unlink_codec);
static int __init aoa_init(void)
{
return 0;
}
static void __exit aoa_exit(void)
{
aoa_alsa_cleanup();
}
module_init(aoa_init);
module_exit(aoa_exit);
| gpl-2.0 |
Zenfone2-Dev/kernel-FlareM | drivers/video/kyro/STG4000Ramdac.c | 15600 | 3931 | /*
* linux/drivers/video/kyro/STG4000Ramdac.c
*
* Copyright (C) 2002 STMicroelectronics
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <video/kyro.h>
#include "STG4000Reg.h"
#include "STG4000Interface.h"
static u32 STG_PIXEL_BUS_WIDTH = 128; /* 128 bit bus width */
static u32 REF_CLOCK = 14318;
int InitialiseRamdac(volatile STG4000REG __iomem * pSTGReg,
u32 displayDepth,
u32 displayWidth,
u32 displayHeight,
s32 HSyncPolarity,
s32 VSyncPolarity, u32 * pixelClock)
{
u32 tmp = 0;
u32 F = 0, R = 0, P = 0;
u32 stride = 0;
u32 ulPdiv = 0;
u32 physicalPixelDepth = 0;
/* Make sure DAC is in Reset */
tmp = STG_READ_REG(SoftwareReset);
if (tmp & 0x1) {
CLEAR_BIT(1);
STG_WRITE_REG(SoftwareReset, tmp);
}
/* Set Pixel Format */
tmp = STG_READ_REG(DACPixelFormat);
CLEAR_BITS_FRM_TO(0, 2);
/* Set LUT not used from 16bpp to 32 bpp ??? */
CLEAR_BITS_FRM_TO(8, 9);
switch (displayDepth) {
case 16:
{
physicalPixelDepth = 16;
tmp |= _16BPP;
break;
}
case 32:
{
/* Set for 32 bits per pixel */
physicalPixelDepth = 32;
tmp |= _32BPP;
break;
}
default:
return -EINVAL;
}
STG_WRITE_REG(DACPixelFormat, tmp);
/* Workout Bus transfer bandwidth according to pixel format */
ulPdiv = STG_PIXEL_BUS_WIDTH / physicalPixelDepth;
/* Get Screen Stride in pixels */
stride = displayWidth;
/* Set Primary size info */
tmp = STG_READ_REG(DACPrimSize);
CLEAR_BITS_FRM_TO(0, 10);
CLEAR_BITS_FRM_TO(12, 31);
tmp |=
((((displayHeight - 1) << 12) | (((displayWidth / ulPdiv) -
1) << 23))
| (stride / ulPdiv));
STG_WRITE_REG(DACPrimSize, tmp);
/* Set Pixel Clock */
*pixelClock = ProgramClock(REF_CLOCK, *pixelClock, &F, &R, &P);
/* Set DAC PLL Mode */
tmp = STG_READ_REG(DACPLLMode);
CLEAR_BITS_FRM_TO(0, 15);
/* tmp |= ((P-1) | ((F-2) << 2) | ((R-2) << 11)); */
tmp |= ((P) | ((F - 2) << 2) | ((R - 2) << 11));
STG_WRITE_REG(DACPLLMode, tmp);
/* Set Prim Address */
tmp = STG_READ_REG(DACPrimAddress);
CLEAR_BITS_FRM_TO(0, 20);
CLEAR_BITS_FRM_TO(20, 31);
STG_WRITE_REG(DACPrimAddress, tmp);
/* Set Cursor details with HW Cursor disabled */
tmp = STG_READ_REG(DACCursorCtrl);
tmp &= ~SET_BIT(31);
STG_WRITE_REG(DACCursorCtrl, tmp);
tmp = STG_READ_REG(DACCursorAddr);
CLEAR_BITS_FRM_TO(0, 20);
STG_WRITE_REG(DACCursorAddr, tmp);
/* Set Video Window */
tmp = STG_READ_REG(DACVidWinStart);
CLEAR_BITS_FRM_TO(0, 10);
CLEAR_BITS_FRM_TO(16, 26);
STG_WRITE_REG(DACVidWinStart, tmp);
tmp = STG_READ_REG(DACVidWinEnd);
CLEAR_BITS_FRM_TO(0, 10);
CLEAR_BITS_FRM_TO(16, 26);
STG_WRITE_REG(DACVidWinEnd, tmp);
/* Set DAC Border Color to default */
tmp = STG_READ_REG(DACBorderColor);
CLEAR_BITS_FRM_TO(0, 23);
STG_WRITE_REG(DACBorderColor, tmp);
/* Set Graphics and Overlay Burst Control */
STG_WRITE_REG(DACBurstCtrl, 0x0404);
/* Set CRC Trigger to default */
tmp = STG_READ_REG(DACCrcTrigger);
CLEAR_BIT(0);
STG_WRITE_REG(DACCrcTrigger, tmp);
/* Set Video Port Control to default */
tmp = STG_READ_REG(DigVidPortCtrl);
CLEAR_BIT(8);
CLEAR_BITS_FRM_TO(16, 27);
CLEAR_BITS_FRM_TO(1, 3);
CLEAR_BITS_FRM_TO(10, 11);
STG_WRITE_REG(DigVidPortCtrl, tmp);
return 0;
}
/* Ramdac control, turning output to the screen on and off */
void DisableRamdacOutput(volatile STG4000REG __iomem * pSTGReg)
{
u32 tmp;
/* Disable DAC for Graphics Stream Control */
tmp = (STG_READ_REG(DACStreamCtrl)) & ~SET_BIT(0);
STG_WRITE_REG(DACStreamCtrl, tmp);
}
void EnableRamdacOutput(volatile STG4000REG __iomem * pSTGReg)
{
u32 tmp;
/* Enable DAC for Graphics Stream Control */
tmp = (STG_READ_REG(DACStreamCtrl)) | SET_BIT(0);
STG_WRITE_REG(DACStreamCtrl, tmp);
}
| gpl-2.0 |
pacerom/kernel_oneplus_msm8974 | drivers/staging/prima/CORE/SME/src/oemData/oemDataApi.c | 497 | 15435 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef FEATURE_OEM_DATA_SUPPORT
/** ------------------------------------------------------------------------- *
------------------------------------------------------------------------- *
\file oemDataApi.c
Implementation for the OEM DATA REQ/RSP interfaces.
========================================================================== */
#include "aniGlobal.h"
#include "oemDataApi.h"
#include "palApi.h"
#include "smeInside.h"
#include "smsDebug.h"
#include "csrSupport.h"
#include "wlan_qct_tl.h"
#include "vos_diag_core_log.h"
#include "vos_diag_core_event.h"
/* ---------------------------------------------------------------------------
\fn oemData_OemDataReqOpen
\brief This function must be called before any API call to (OEM DATA REQ/RSP module)
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_OemDataReqOpen(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
do
{
//initialize all the variables to null
vos_mem_set(&(pMac->oemData), sizeof(tOemDataStruct), 0);
if(!HAL_STATUS_SUCCESS(status))
{
smsLog(pMac, LOGE, "oemData_OemDataReqOpen: Cannot allocate memory for the timer function");
break;
}
} while(0);
return status;
}
/* ---------------------------------------------------------------------------
\fn oemData_OemDataReqClose
\brief This function must be called before closing the csr module
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_OemDataReqClose(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
do
{
if(!HAL_STATUS_SUCCESS(status))
{
smsLog(pMac, LOGE, "oemData_OemDataReqClose: Failed in oemData_OemDataReqClose at StopTimers");
break;
}
if(pMac->oemData.pOemDataRsp != NULL)
{
vos_mem_free(pMac->oemData.pOemDataRsp);
}
//initialize all the variables to null
vos_mem_set(&(pMac->oemData), sizeof(tOemDataStruct), 0);
} while(0);
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn oemData_ReleaseOemDataReqCommand
\brief This function removes the oemDataCommand from the active list and
and frees up any memory occupied by this
\return eHalStatus
-------------------------------------------------------------------------------*/
void oemData_ReleaseOemDataReqCommand(tpAniSirGlobal pMac, tSmeCmd *pOemDataCmd, eOemDataReqStatus oemDataReqStatus)
{
//Do the callback
pOemDataCmd->u.oemDataCmd.callback(pMac, pOemDataCmd->u.oemDataCmd.pContext, pOemDataCmd->u.oemDataCmd.oemDataReqID, oemDataReqStatus);
//First take this command out of the active list
if(csrLLRemoveEntry(&pMac->sme.smeCmdActiveList, &pOemDataCmd->Link, LL_ACCESS_LOCK))
{
vos_mem_set(&(pOemDataCmd->u.oemDataCmd), sizeof(tOemDataCmd), 0);
//Now put this command back on the avilable command list
smeReleaseCommand(pMac, pOemDataCmd);
}
else
{
smsLog(pMac, LOGE, "OEM_DATA: **************** oemData_ReleaseOemDataReqCommand cannot release the command");
}
}
/* ---------------------------------------------------------------------------
\fn oemData_OemDataReq
\brief Request an OEM DATA RSP
\param sessionId - Id of session to be used
\param pOemDataReqID - pointer to an object to get back the request ID
\param callback - a callback function that is called upon finish
\param pContext - a pointer passed in for the callback
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_OemDataReq(tHalHandle hHal,
tANI_U8 sessionId,
tOemDataReqConfig *oemDataReqConfig,
tANI_U32 *pOemDataReqID,
oemData_OemDataReqCompleteCallback callback,
void *pContext)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tSmeCmd *pOemDataCmd = NULL;
do
{
if( !CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = eHAL_STATUS_FAILURE;
break;
}
pMac->oemData.oemDataReqConfig.sessionId = sessionId;
pMac->oemData.callback = callback;
pMac->oemData.pContext = pContext;
pMac->oemData.oemDataReqID = *(pOemDataReqID);
vos_mem_copy((v_VOID_t*)(pMac->oemData.oemDataReqConfig.oemDataReq), (v_VOID_t*)(oemDataReqConfig->oemDataReq), OEM_DATA_REQ_SIZE);
pMac->oemData.oemDataReqActive = eANI_BOOLEAN_FALSE;
pOemDataCmd = smeGetCommandBuffer(pMac);
//fill up the command before posting it.
if(pOemDataCmd)
{
pOemDataCmd->command = eSmeCommandOemDataReq;
pOemDataCmd->u.oemDataCmd.callback = callback;
pOemDataCmd->u.oemDataCmd.pContext = pContext;
pOemDataCmd->u.oemDataCmd.oemDataReqID = pMac->oemData.oemDataReqID;
//set the oem data request
pOemDataCmd->u.oemDataCmd.oemDataReq.sessionId = pMac->oemData.oemDataReqConfig.sessionId;
vos_mem_copy((v_VOID_t*)(pOemDataCmd->u.oemDataCmd.oemDataReq.oemDataReq),
(v_VOID_t*)(pMac->oemData.oemDataReqConfig.oemDataReq), OEM_DATA_REQ_SIZE);
}
else
{
status = eHAL_STATUS_FAILURE;
break;
}
//now queue this command in the sme command queue
//Here since this is not interacting with the csr just push the command
//into the sme queue. Also push this command with the normal priority
smePushCommand(pMac, pOemDataCmd, eANI_BOOLEAN_FALSE);
} while(0);
if(!HAL_STATUS_SUCCESS(status) && pOemDataCmd)
{
oemData_ReleaseOemDataReqCommand(pMac, pOemDataCmd, eOEM_DATA_REQ_FAILURE);
pMac->oemData.oemDataReqActive = eANI_BOOLEAN_FALSE;
}
return status;
}
/* ---------------------------------------------------------------------------
\fn oemData_SendMBOemDataReq
\brief Request an OEM DATA REQ to be passed down to PE
\param pMac:
\param pOemDataReq: Pointer to the oem data request
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_SendMBOemDataReq(tpAniSirGlobal pMac, tOemDataReq *pOemDataReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tSirOemDataReq* pMsg;
tANI_U16 msgLen;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, pOemDataReq->sessionId );
smsLog(pMac, LOGW, "OEM_DATA: entering Function %s", __func__);
msgLen = (tANI_U16)(sizeof(tSirOemDataReq));
status = palAllocateMemory(pMac->hHdd, (void**)&pMsg, msgLen);
if(HAL_STATUS_SUCCESS(status))
{
palZeroMemory(pMac->hHdd, pMsg, msgLen);
pMsg->messageType = pal_cpu_to_be16((tANI_U16)eWNI_SME_OEM_DATA_REQ);
palCopyMemory(pMac->hHdd, pMsg->selfMacAddr, pSession->selfMacAddr, sizeof(tSirMacAddr) );
status = palCopyMemory(pMac->hHdd, pMsg->oemDataReq, pOemDataReq->oemDataReq, OEM_DATA_REQ_SIZE);
if(HAL_STATUS_SUCCESS(status))
{
smsLog(pMac, LOGW, "OEM_DATA: sending message to pe%s", __func__);
status = palSendMBMessage(pMac->hHdd, pMsg);
}
else
{
palFreeMemory(pMac->hHdd, pMsg);
}
}
smsLog(pMac, LOGW, "OEM_DATA: exiting Function %s", __func__);
return status;
}
/* ---------------------------------------------------------------------------
\fn oemData_ProcessOemDataReqCommand
\brief This function is called by the smeProcessCommand when the case hits
eSmeCommandOemDataReq
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_ProcessOemDataReqCommand(tpAniSirGlobal pMac, tSmeCmd *pOemDataReqCmd)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
//check if the system is in proper mode of operation for
//oem data req/rsp to be functional. Currently, concurrency is not
//supported and the driver must be operational only as
//STA for oem data req/rsp to be functional. We return an invalid
//mode flag if it is operational as any one of the following
//in any of the active sessions
//1. AP Mode
//2. IBSS Mode
//3. BTAMP Mode ...
if(eHAL_STATUS_SUCCESS == oemData_IsOemDataReqAllowed(pMac))
{
smsLog(pMac, LOG1, "%s: OEM_DATA REQ allowed in the current mode", __func__);
pMac->oemData.oemDataReqActive = eANI_BOOLEAN_TRUE;
status = oemData_SendMBOemDataReq(pMac, &(pOemDataReqCmd->u.oemDataCmd.oemDataReq));
}
else
{
smsLog(pMac, LOG1, "%s: OEM_DATA REQ not allowed in the current mode", __func__);
oemData_ReleaseOemDataReqCommand(pMac, pOemDataReqCmd, eOEM_DATA_REQ_INVALID_MODE);
pMac->oemData.oemDataReqActive = eANI_BOOLEAN_FALSE;
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_HandleOemDataRsp
\brief This function processes the oem data response obtained from the PE
\param pMsg - Pointer to the pSirOemDataRsp
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HandleOemDataRsp(tHalHandle hHal, tANI_U8* pMsg)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac;
tListElem *pEntry = NULL;
tSmeCmd *pCommand = NULL;
tSirOemDataRsp* pOemDataRsp = NULL;
pMac = PMAC_STRUCT(hHal);
smsLog(pMac, LOG1, "%s: OEM_DATA Entering", __func__);
do
{
if(pMsg == NULL)
{
smsLog(pMac, LOGE, "in %s msg ptr is NULL", __func__);
status = eHAL_STATUS_FAILURE;
break;
}
pEntry = csrLLPeekHead( &pMac->sme.smeCmdActiveList, LL_ACCESS_LOCK );
if(pEntry)
{
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
if(eSmeCommandOemDataReq == pCommand->command)
{
pOemDataRsp = (tSirOemDataRsp*)pMsg;
//make sure to acquire the lock before modifying the data
status = sme_AcquireGlobalLock(&pMac->sme);
if(!HAL_STATUS_SUCCESS(status))
{
break;
}
if(pMac->oemData.pOemDataRsp != NULL)
{
vos_mem_free(pMac->oemData.pOemDataRsp);
}
pMac->oemData.pOemDataRsp = (tOemDataRsp*)vos_mem_malloc(sizeof(tOemDataRsp));
if(pMac->oemData.pOemDataRsp == NULL)
{
sme_ReleaseGlobalLock(&pMac->sme);
smsLog(pMac, LOGE, "in %s vos_mem_malloc failed for pMac->oemData.pOemDataRsp", __func__);
status = eHAL_STATUS_FAILURE;
break;
}
smsLog(pMac, LOGE, "Before memory copy");
vos_mem_copy((v_VOID_t*)(pMac->oemData.pOemDataRsp), (v_VOID_t*)(&pOemDataRsp->oemDataRsp), sizeof(tOemDataRsp));
smsLog(pMac, LOGE, "after memory copy");
sme_ReleaseGlobalLock(&pMac->sme);
}
else
{
smsLog(pMac, LOGE, "in %s eWNI_SME_OEM_DATA_RSP Received but NO REQs are ACTIVE ...",
__func__);
status = eHAL_STATUS_FAILURE;
break;
}
}
else
{
smsLog(pMac, LOGE, "in %s eWNI_SME_OEM_DATA_RSP Received but NO commands are ACTIVE ...", __func__);
status = eHAL_STATUS_FAILURE;
break;
}
oemData_ReleaseOemDataReqCommand(pMac, pCommand, eHAL_STATUS_SUCCESS);
pMac->oemData.oemDataReqActive = eANI_BOOLEAN_FALSE;
} while(0);
return status;
}
/* ---------------------------------------------------------------------------
\fn oemData_IsOemDataReqAllowed
\brief This function checks if OEM DATA REQs can be performed in the
current driver state
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus oemData_IsOemDataReqAllowed(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tANI_U32 sessionId;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
for(sessionId = 0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++)
{
if(CSR_IS_SESSION_VALID(pMac, sessionId))
{
//co-exist with IBSS or BT-AMP mode is not supported
if(csrIsConnStateIbss(pMac, sessionId) || csrIsBTAMP(pMac, sessionId) )
{
//co-exist with IBSS or BT-AMP mode is not supported
smsLog(pMac, LOGW, "OEM DATA REQ is not allowed due to IBSS|BTAMP exist in session %d", sessionId);
status = eHAL_STATUS_CSR_WRONG_STATE;
break;
}
}
}
smsLog(pMac, LOG1, "Exiting oemData_IsOemDataReqAllowed with status %d", status);
return (status);
}
#endif /*FEATURE_OEM_DATA_SUPPORT*/
| gpl-2.0 |
Clouded/linux-rt-rpi2 | kernel/power/main.c | 497 | 15521 | /*
* kernel/power/main.c - PM subsystem core functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* This file is released under the GPLv2
*
*/
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "power.h"
DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
int register_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_pm_notifier);
int unregister_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 1;
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_async_enabled);
}
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_async_enabled = val;
return n;
}
power_attr(pm_async);
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
[TEST_NONE] = "none",
[TEST_CORE] = "core",
[TEST_CPUS] = "processors",
[TEST_PLATFORM] = "platform",
[TEST_DEVICES] = "devices",
[TEST_FREEZER] = "freezer",
};
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
int level;
for (level = TEST_FIRST; level <= TEST_MAX; level++)
if (pm_tests[level]) {
if (level == pm_test_level)
s += sprintf(s, "[%s] ", pm_tests[level]);
else
s += sprintf(s, "%s ", pm_tests[level]);
}
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
const char * const *s;
int level;
char *p;
int len;
int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
pm_test_level = level;
error = 0;
break;
}
unlock_system_sleep();
return error ? error : n;
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
{
switch (step) {
case SUSPEND_FREEZE:
return "freeze";
case SUSPEND_PREPARE:
return "prepare";
case SUSPEND_SUSPEND:
return "suspend";
case SUSPEND_SUSPEND_NOIRQ:
return "suspend_noirq";
case SUSPEND_RESUME_NOIRQ:
return "resume_noirq";
case SUSPEND_RESUME:
return "resume";
default:
return "";
}
}
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
"success", suspend_stats.success,
"fail", suspend_stats.fail,
"failed_freeze", suspend_stats.failed_freeze,
"failed_prepare", suspend_stats.failed_prepare,
"failed_suspend", suspend_stats.failed_suspend,
"failed_suspend_late",
suspend_stats.failed_suspend_late,
"failed_suspend_noirq",
suspend_stats.failed_suspend_noirq,
"failed_resume", suspend_stats.failed_resume,
"failed_resume_early",
suspend_stats.failed_resume_early,
"failed_resume_noirq",
suspend_stats.failed_resume_noirq);
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-d\n",
suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[last_step]));
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[index]));
}
return 0;
}
static int suspend_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_stats_show, NULL);
}
static const struct file_operations suspend_stats_operations = {
.open = suspend_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
NULL, NULL, &suspend_stats_operations);
return 0;
}
late_initcall(pm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
/*
* pm_print_times: print time taken by devices to suspend and resume.
*
* show() returns whether printing of suspend and resume times is enabled.
* store() accepts 0 or 1. 0 disables printing and 1 enables it.
*/
bool pm_print_times_enabled;
static ssize_t pm_print_times_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_print_times_enabled);
}
static ssize_t pm_print_times_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_print_times_enabled = !!val;
return n;
}
power_attr(pm_print_times);
static inline void pm_print_times_init(void)
{
pm_print_times_enabled = !!initcall_debug;
}
#else /* !CONFIG_PP_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
struct kobject *power_kobj;
/**
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",
* "freeze" and "disk" (hibernation). See Documentation/power/states.txt for a
* description of what they mean.
*
* store() accepts one of those strings, translates it into the proper
* enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
if (pm_states[i])
s += sprintf(s,"%s ", pm_states[i]);
#endif
if (hibernation_available())
s += sprintf(s, "disk ");
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state;
#endif
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
/* Check hibernation first. */
if (len == 4 && !strncmp(buf, "disk", len))
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
const char *label = pm_states[state];
if (label && len == strlen(label) && !strncmp(buf, label, len))
return state;
}
#endif
return PM_SUSPEND_ON;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
state = decode_state(buf, n);
if (state < PM_SUSPEND_MAX)
error = pm_suspend(state);
else if (state == PM_SUSPEND_MAX)
error = hibernate();
else
error = -EINVAL;
out:
pm_autosleep_unlock();
return error ? error : n;
}
power_attr(state);
#ifdef CONFIG_PM_SLEEP
/*
* The 'wakeup_count' attribute, along with the functions defined in
* drivers/base/power/wakeup.c, provides a means by which wakeup events can be
* handled in a non-racy way.
*
* If a wakeup event occurs when the system is in a sleep state, it simply is
* woken up. In turn, if an event that would wake the system up from a sleep
* state occurs when it is undergoing a transition to that sleep state, the
* transition should be aborted. Moreover, if such an event occurs when the
* system is in the working state, an attempt to start a transition to the
* given sleep state should fail during certain period after the detection of
* the event. Using the 'state' attribute alone is not sufficient to satisfy
* these requirements, because a wakeup event may occur exactly when 'state'
* is being written to and may be delivered to user space right before it is
* frozen, so the event will remain only partially processed until the system is
* woken up by another event. In particular, it won't cause the transition to
* a sleep state to be aborted.
*
* This difficulty may be overcome if user space uses 'wakeup_count' before
* writing to 'state'. It first should read from 'wakeup_count' and store
* the read value. Then, after carrying out its own preparations for the system
* transition to a sleep state, it should write the stored value to
* 'wakeup_count'. If that fails, at least one wakeup event has occurred since
* 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
* is allowed to write to 'state', but the transition will be aborted if there
* are any wakeup events detected after 'wakeup_count' was written to.
*/
static ssize_t wakeup_count_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
unsigned int val;
return pm_get_wakeup_count(&val, true) ?
sprintf(buf, "%u\n", val) : -EINTR;
}
static ssize_t wakeup_count_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
error = -EINVAL;
if (sscanf(buf, "%u", &val) == 1) {
if (pm_save_wakeup_count(val))
error = n;
else
pm_print_active_wakeup_sources();
}
out:
pm_autosleep_unlock();
return error;
}
power_attr(wakeup_count);
#ifdef CONFIG_PM_AUTOSLEEP
static ssize_t autosleep_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
suspend_state_t state = pm_autosleep_state();
if (state == PM_SUSPEND_ON)
return sprintf(buf, "off\n");
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
return sprintf(buf, "%s\n", pm_states[state] ?
pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
return sprintf(buf, "disk\n");
#else
return sprintf(buf, "error");
#endif
}
static ssize_t autosleep_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state = decode_state(buf, n);
int error;
if (state == PM_SUSPEND_ON
&& strcmp(buf, "off") && strcmp(buf, "off\n"))
return -EINVAL;
error = pm_autosleep_set_state(state);
return error ? error : n;
}
power_attr(autosleep);
#endif /* CONFIG_PM_AUTOSLEEP */
#ifdef CONFIG_PM_WAKELOCKS
static ssize_t wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, true);
}
static ssize_t wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_lock(buf);
return error ? error : n;
}
power_attr(wake_lock);
static ssize_t wake_unlock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, false);
}
static ssize_t wake_unlock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_unlock(buf);
return error ? error : n;
}
power_attr(wake_unlock);
#endif /* CONFIG_PM_WAKELOCKS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
int val;
if (sscanf(buf, "%d", &val) == 1) {
pm_trace_enabled = !!val;
if (pm_trace_enabled) {
pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
"PM: Correct system time has to be restored manually after resume.\n");
}
return n;
}
return -EINVAL;
}
power_attr(pm_trace);
static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return show_trace_dev_match(buf, PAGE_SIZE);
}
static ssize_t
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
return -EINVAL;
}
power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
#ifdef CONFIG_FREEZER
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
freeze_timeout_msecs = val;
return n;
}
power_attr(pm_freeze_timeout);
#endif /* CONFIG_FREEZER*/
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
&pm_trace_dev_match_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
&wakeup_count_attr.attr,
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,
#endif
#ifdef CONFIG_PM_WAKELOCKS
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_print_times_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
&pm_freeze_timeout_attr.attr,
#endif
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
return pm_wq ? 0 : -ENOMEM;
}
static int __init pm_init(void)
{
int error = pm_start_workqueue();
if (error)
return error;
hibernate_image_size_init();
hibernate_reserved_size_init();
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return error;
pm_print_times_init();
return pm_autosleep_init();
}
core_initcall(pm_init);
| gpl-2.0 |
ZolaIII/android_kernel_synopsis_nightly | drivers/video/msm/mipi_toshiba.c | 497 | 11412 | /* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_fb.h"
#include "mipi_dsi.h"
#include "mipi_toshiba.h"
static struct pwm_device *bl_lpm;
static struct mipi_dsi_panel_platform_data *mipi_toshiba_pdata;
#define TM_GET_PID(id) (((id) & 0xff00)>>8)
static struct dsi_buf toshiba_tx_buf;
static struct dsi_buf toshiba_rx_buf;
static int mipi_toshiba_lcd_init(void);
#ifdef TOSHIBA_CMDS_UNUSED
static char one_lane[3] = {0xEF, 0x60, 0x62};
static char dmode_wqvga[2] = {0xB3, 0x01};
static char intern_wr_clk1_wqvga[3] = {0xef, 0x2f, 0x22};
static char intern_wr_clk2_wqvga[3] = {0xef, 0x6e, 0x33};
static char hor_addr_2A_wqvga[5] = {0x2A, 0x00, 0x00, 0x00, 0xef};
static char hor_addr_2B_wqvga[5] = {0x2B, 0x00, 0x00, 0x01, 0xaa};
static char if_sel_cmd[2] = {0x53, 0x00};
#endif
static char exit_sleep[2] = {0x11, 0x00};
static char display_on[2] = {0x29, 0x00};
static char display_off[2] = {0x28, 0x00};
static char enter_sleep[2] = {0x10, 0x00};
static char mcap_off[2] = {0xb2, 0x00};
static char ena_test_reg[3] = {0xEF, 0x01, 0x01};
static char two_lane[3] = {0xEF, 0x60, 0x63};
static char non_burst_sync_pulse[3] = {0xef, 0x61, 0x09};
static char dmode_wvga[2] = {0xB3, 0x00};
static char intern_wr_clk1_wvga[3] = {0xef, 0x2f, 0xcc};
static char intern_wr_clk2_wvga[3] = {0xef, 0x6e, 0xdd};
static char hor_addr_2A_wvga[5] = {0x2A, 0x00, 0x00, 0x01, 0xdf};
static char hor_addr_2B_wvga[5] = {0x2B, 0x00, 0x00, 0x03, 0x55};
static char if_sel_video[2] = {0x53, 0x01};
static struct dsi_cmd_desc toshiba_wvga_display_on_cmds[] = {
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(mcap_off), mcap_off},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(ena_test_reg), ena_test_reg},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(two_lane), two_lane},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(non_burst_sync_pulse),
non_burst_sync_pulse},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dmode_wvga), dmode_wvga},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(intern_wr_clk1_wvga),
intern_wr_clk1_wvga},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(intern_wr_clk2_wvga),
intern_wr_clk2_wvga},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(hor_addr_2A_wvga),
hor_addr_2A_wvga},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(hor_addr_2B_wvga),
hor_addr_2B_wvga},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(if_sel_video), if_sel_video},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(exit_sleep), exit_sleep},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_on), display_on}
};
static char mcap_start[2] = {0xb0, 0x04};
static char num_out_pixelform[3] = {0xb3, 0x00, 0x87};
static char dsi_ctrl[3] = {0xb6, 0x30, 0x83};
static char panel_driving[7] = {0xc0, 0x01, 0x00, 0x85, 0x00, 0x00, 0x00};
static char dispV_timing[5] = {0xc1, 0x00, 0x10, 0x00, 0x01};
static char dispCtrl[3] = {0xc3, 0x00, 0x19};
static char test_mode_c4[2] = {0xc4, 0x03};
static char dispH_timing[15] = {
/* TYPE_DCS_LWRITE */
0xc5, 0x00, 0x01, 0x05,
0x04, 0x5e, 0x00, 0x00,
0x00, 0x00, 0x0b, 0x17,
0x05, 0x00, 0x00
};
static char test_mode_c6[2] = {0xc6, 0x00};
static char gamma_setA[13] = {
0xc8, 0x0a, 0x15, 0x18,
0x1b, 0x1c, 0x0d, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00
};
static char gamma_setB[13] = {
0xc9, 0x0d, 0x1d, 0x1f,
0x1f, 0x1f, 0x10, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00
};
static char gamma_setC[13] = {
0xca, 0x1e, 0x1f, 0x1e,
0x1d, 0x1d, 0x10, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00
};
static char powerSet_ChrgPmp[5] = {0xd0, 0x02, 0x00, 0xa3, 0xb8};
static char testMode_d1[6] = {0xd1, 0x10, 0x14, 0x53, 0x64, 0x00};
static char powerSet_SrcAmp[3] = {0xd2, 0xb3, 0x00};
static char powerInt_PS[3] = {0xd3, 0x33, 0x03};
static char vreg[2] = {0xd5, 0x00};
static char test_mode_d6[2] = {0xd6, 0x01};
static char timingCtrl_d7[9] = {
0xd7, 0x09, 0x00, 0x84,
0x81, 0x61, 0xbc, 0xb5,
0x05
};
static char timingCtrl_d8[7] = {
0xd8, 0x04, 0x25, 0x90,
0x4c, 0x92, 0x00
};
static char timingCtrl_d9[4] = {0xd9, 0x5b, 0x7f, 0x05};
static char white_balance[6] = {0xcb, 0x00, 0x00, 0x00, 0x1c, 0x00};
static char vcs_settings[2] = {0xdd, 0x53};
static char vcom_dc_settings[2] = {0xde, 0x43};
static char testMode_e3[5] = {0xe3, 0x00, 0x00, 0x00, 0x00};
static char testMode_e4[6] = {0xe4, 0x00, 0x00, 0x22, 0xaa, 0x00};
static char testMode_e5[2] = {0xe5, 0x00};
static char testMode_fa[4] = {0xfa, 0x00, 0x00, 0x00};
static char testMode_fd[5] = {0xfd, 0x00, 0x00, 0x00, 0x00};
static char testMode_fe[5] = {0xfe, 0x00, 0x00, 0x00, 0x00};
static char mcap_end[2] = {0xb0, 0x03};
static char set_add_mode[2] = {0x36, 0x0};
static char set_pixel_format[2] = {0x3a, 0x70};
static struct dsi_cmd_desc toshiba_wsvga_display_on_cmds[] = {
{DTYPE_GEN_WRITE2, 1, 0, 0, 10, sizeof(mcap_start), mcap_start},
{DTYPE_GEN_LWRITE, 1, 0, 0, 10, sizeof(num_out_pixelform),
num_out_pixelform},
{DTYPE_GEN_LWRITE, 1, 0, 0, 10, sizeof(dsi_ctrl), dsi_ctrl},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(panel_driving), panel_driving},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispV_timing), dispV_timing},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispCtrl), dispCtrl},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_c4), test_mode_c4},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispH_timing), dispH_timing},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_c6), test_mode_c6},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setA), gamma_setA},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setB), gamma_setB},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setC), gamma_setC},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerSet_ChrgPmp),
powerSet_ChrgPmp},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_d1), testMode_d1},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerSet_SrcAmp),
powerSet_SrcAmp},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerInt_PS), powerInt_PS},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vreg), vreg},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_d6), test_mode_d6},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d7), timingCtrl_d7},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d8), timingCtrl_d8},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d9), timingCtrl_d9},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(white_balance), white_balance},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vcs_settings), vcs_settings},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vcom_dc_settings),
vcom_dc_settings},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_e3), testMode_e3},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_e4), testMode_e4},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(testMode_e5), testMode_e5},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fa), testMode_fa},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fd), testMode_fd},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fe), testMode_fe},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(mcap_end), mcap_end},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_add_mode), set_add_mode},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_pixel_format),
set_pixel_format},
{DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(exit_sleep), exit_sleep},
{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(display_on), display_on}
};
static struct dsi_cmd_desc toshiba_display_off_cmds[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(display_off), display_off},
{DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(enter_sleep), enter_sleep}
};
static int mipi_toshiba_lcd_on(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT)
mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wvga_display_on_cmds,
ARRAY_SIZE(toshiba_wvga_display_on_cmds));
else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA)
mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wsvga_display_on_cmds,
ARRAY_SIZE(toshiba_wsvga_display_on_cmds));
else
return -EINVAL;
return 0;
}
static int mipi_toshiba_lcd_off(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
mipi_dsi_cmds_tx(&toshiba_tx_buf, toshiba_display_off_cmds,
ARRAY_SIZE(toshiba_display_off_cmds));
return 0;
}
void mipi_bklight_pwm_cfg(void)
{
if (mipi_toshiba_pdata && mipi_toshiba_pdata->dsi_pwm_cfg)
mipi_toshiba_pdata->dsi_pwm_cfg();
}
static void mipi_toshiba_set_backlight(struct msm_fb_data_type *mfd)
{
int ret;
static int bklight_pwm_cfg;
if (bklight_pwm_cfg == 0) {
mipi_bklight_pwm_cfg();
bklight_pwm_cfg++;
}
if (bl_lpm) {
ret = pwm_config(bl_lpm, MIPI_TOSHIBA_PWM_DUTY_LEVEL *
mfd->bl_level, MIPI_TOSHIBA_PWM_PERIOD_USEC);
if (ret) {
pr_err("pwm_config on lpm failed %d\n", ret);
return;
}
if (mfd->bl_level) {
ret = pwm_enable(bl_lpm);
if (ret)
pr_err("pwm enable/disable on lpm failed"
"for bl %d\n", mfd->bl_level);
} else {
pwm_disable(bl_lpm);
}
}
}
static int __devinit mipi_toshiba_lcd_probe(struct platform_device *pdev)
{
if (pdev->id == 0) {
mipi_toshiba_pdata = pdev->dev.platform_data;
return 0;
}
if (mipi_toshiba_pdata == NULL) {
pr_err("%s.invalid platform data.\n", __func__);
return -ENODEV;
}
if (mipi_toshiba_pdata != NULL)
bl_lpm = pwm_request(mipi_toshiba_pdata->gpio[0],
"backlight");
if (bl_lpm == NULL || IS_ERR(bl_lpm)) {
pr_err("%s pwm_request() failed\n", __func__);
bl_lpm = NULL;
}
pr_debug("bl_lpm = %p lpm = %d\n", bl_lpm,
mipi_toshiba_pdata->gpio[0]);
msm_fb_add_device(pdev);
return 0;
}
static struct platform_driver this_driver = {
.probe = mipi_toshiba_lcd_probe,
.driver = {
.name = "mipi_toshiba",
},
};
static struct msm_fb_panel_data toshiba_panel_data = {
.on = mipi_toshiba_lcd_on,
.off = mipi_toshiba_lcd_off,
.set_backlight = mipi_toshiba_set_backlight,
};
static int ch_used[3];
int mipi_toshiba_device_register(struct msm_panel_info *pinfo,
u32 channel, u32 panel)
{
struct platform_device *pdev = NULL;
int ret;
if ((channel >= 3) || ch_used[channel])
return -ENODEV;
ch_used[channel] = TRUE;
ret = mipi_toshiba_lcd_init();
if (ret) {
pr_err("mipi_toshiba_lcd_init() failed with ret %u\n", ret);
return ret;
}
pdev = platform_device_alloc("mipi_toshiba", (panel << 8)|channel);
if (!pdev)
return -ENOMEM;
toshiba_panel_data.panel_info = *pinfo;
ret = platform_device_add_data(pdev, &toshiba_panel_data,
sizeof(toshiba_panel_data));
if (ret) {
printk(KERN_ERR
"%s: platform_device_add_data failed!\n", __func__);
goto err_device_put;
}
ret = platform_device_add(pdev);
if (ret) {
printk(KERN_ERR
"%s: platform_device_register failed!\n", __func__);
goto err_device_put;
}
return 0;
err_device_put:
platform_device_put(pdev);
return ret;
}
static int mipi_toshiba_lcd_init(void)
{
mipi_dsi_buf_alloc(&toshiba_tx_buf, DSI_BUF_SIZE);
mipi_dsi_buf_alloc(&toshiba_rx_buf, DSI_BUF_SIZE);
return platform_driver_register(&this_driver);
}
| gpl-2.0 |
mcqaissi/galaxy-s3-kernel | drivers/tty/serial/atmel_serial.c | 753 | 45961 | /*
* Driver for Atmel AT91 / AT32 Serial ports
* Copyright (C) 2003 Rick Bronson
*
* Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* DMA support added by Chip Coldwell.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/ioctls.h>
#include <asm/mach/serial_at91.h>
#include <mach/board.h>
#ifdef CONFIG_ARM
#include <mach/cpu.h>
#include <mach/gpio.h>
#endif
#define PDC_BUFFER_SIZE 512
/* Revisit: We should calculate this based on the actual port settings */
#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
static void atmel_start_rx(struct uart_port *port);
static void atmel_stop_rx(struct uart_port *port);
#ifdef CONFIG_SERIAL_ATMEL_TTYAT
/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
* should coexist with the 8250 driver, such as if we have an external 16C550
* UART. */
#define SERIAL_ATMEL_MAJOR 204
#define MINOR_START 154
#define ATMEL_DEVICENAME "ttyAT"
#else
/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
* name, but it is legally reserved for the 8250 driver. */
#define SERIAL_ATMEL_MAJOR TTY_MAJOR
#define MINOR_START 64
#define ATMEL_DEVICENAME "ttyS"
#endif
#define ATMEL_ISR_PASS_LIMIT 256
/* UART registers. CR is write-only, hence no GET macro */
#define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR)
#define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR)
#define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR)
#define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER)
#define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR)
#define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR)
#define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR)
#define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR)
#define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR)
#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
#define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR)
#define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
#define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
#define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
#define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
static int (*atmel_open_hook)(struct uart_port *);
static void (*atmel_close_hook)(struct uart_port *);
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
unsigned int dma_size;
unsigned int ofs;
};
struct atmel_uart_char {
u16 status;
u16 ch;
};
#define ATMEL_SERIAL_RINGSIZE 1024
/*
* We wrap our port structure around the generic uart_port.
*/
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
short use_dma_rx; /* enable PDC receiver */
short pdc_rx_idx; /* current PDC RX buffer */
struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
short use_dma_tx; /* enable PDC transmitter */
struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
struct tasklet_struct tasklet;
unsigned int irq_status;
unsigned int irq_status_prev;
struct circ_buf rx_ring;
struct serial_rs485 rs485; /* rs485 settings */
unsigned int tx_done_mask;
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
#ifdef SUPPORT_SYSRQ
static struct console atmel_console;
#endif
static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port *uart)
{
return container_of(uart, struct atmel_uart_port, uart);
}
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_dma_rx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_rx;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_tx;
}
#else
static bool atmel_use_dma_rx(struct uart_port *port)
{
return false;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
return false;
}
#endif
/* Enable or disable the rs485 support */
void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
spin_lock(&port->lock);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
mode = UART_GET_MR(port);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
atmel_port->rs485 = *rs485conf;
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
if (atmel_use_dma_tx(port))
atmel_port->tx_done_mask = ATMEL_US_ENDTX |
ATMEL_US_TXBUFE;
else
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
UART_PUT_MR(port, mode);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
spin_unlock(&port->lock);
}
/*
* Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
}
/*
* Set state of the modem control output lines
*/
static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
unsigned int mode;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
#ifdef CONFIG_ARCH_AT91RM9200
if (cpu_is_at91rm9200()) {
/*
* AT91RM9200 Errata #39: RTS0 is not internally connected
* to PA21. We need to drive the pin manually.
*/
if (port->mapbase == AT91RM9200_BASE_US0) {
if (mctrl & TIOCM_RTS)
at91_set_gpio_value(AT91_PIN_PA21, 0);
else
at91_set_gpio_value(AT91_PIN_PA21, 1);
}
}
#endif
if (mctrl & TIOCM_RTS)
control |= ATMEL_US_RTSEN;
else
control |= ATMEL_US_RTSDIS;
if (mctrl & TIOCM_DTR)
control |= ATMEL_US_DTREN;
else
control |= ATMEL_US_DTRDIS;
UART_PUT_CR(port, control);
/* Local loopback mode? */
mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE;
if (mctrl & TIOCM_LOOP)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
UART_PUT_MR(port, mode);
}
/*
* Get state of the modem control input lines
*/
static u_int atmel_get_mctrl(struct uart_port *port)
{
unsigned int status, ret = 0;
status = UART_GET_CSR(port);
/*
* The control signals are active low.
*/
if (!(status & ATMEL_US_DCD))
ret |= TIOCM_CD;
if (!(status & ATMEL_US_CTS))
ret |= TIOCM_CTS;
if (!(status & ATMEL_US_DSR))
ret |= TIOCM_DSR;
if (!(status & ATMEL_US_RI))
ret |= TIOCM_RI;
return ret;
}
/*
* Stop transmitting.
*/
static void atmel_stop_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
}
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_start_rx(port);
}
/*
* Start transmitting.
*/
static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_stop_rx(port);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
}
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* start receiving - port is in process of being opened.
*/
static void atmel_start_rx(struct uart_port *port)
{
UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
if (atmel_use_dma_rx(port)) {
/* enable PDC controller */
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
}
/*
* Stop receiving - port is in process of being closed.
*/
static void atmel_stop_rx(struct uart_port *port)
{
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
} else {
UART_PUT_IDR(port, ATMEL_US_RXRDY);
}
}
/*
* Enable modem status interrupts
*/
static void atmel_enable_ms(struct uart_port *port)
{
UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC
| ATMEL_US_DCDIC | ATMEL_US_CTSIC);
}
/*
* Control the transmission of a break signal
*/
static void atmel_break_ctl(struct uart_port *port, int break_state)
{
if (break_state != 0)
UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */
else
UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */
}
/*
* Stores the incoming character in the ring buffer
*/
static void
atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
unsigned int ch)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
struct atmel_uart_char *c;
if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
/* Buffer overflow, ignore char */
return;
c = &((struct atmel_uart_char *)ring->buf)[ring->head];
c->status = status;
c->ch = ch;
/* Make sure the character is stored before we update head. */
smp_wmb();
ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
}
/*
* Deal with parity, framing and overrun errors.
*/
static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
{
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
}
/*
* Characters received (called from interrupt handler)
*/
static void atmel_rx_chars(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, ch;
status = UART_GET_CSR(port);
while (status & ATMEL_US_RXRDY) {
ch = UART_GET_CHAR(port);
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK)
|| atmel_port->break_active)) {
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK
&& !atmel_port->break_active) {
atmel_port->break_active = 1;
UART_PUT_IER(port, ATMEL_US_RXBRK);
} else {
/*
* This is either the end-of-break
* condition or we've received at
* least one character without RXBRK
* being set. In both cases, the next
* RXBRK will indicate start-of-break.
*/
UART_PUT_IDR(port, ATMEL_US_RXBRK);
status &= ~ATMEL_US_RXBRK;
atmel_port->break_active = 0;
}
}
atmel_buffer_rx_char(port, status, ch);
status = UART_GET_CSR(port);
}
tasklet_schedule(&atmel_port->tasklet);
}
/*
* Transmit characters (called from tasklet with TXRDY interrupt
* disabled)
*/
static void atmel_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit))
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* receive interrupt handler.
*/
static void
atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
*
* TODO: We're not handling error flags correctly at
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
UART_PUT_IDR(port, (ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
ATMEL_US_FRAME | ATMEL_US_PARE))
atmel_pdc_rxerr(port, pending);
}
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
else if (pending & ATMEL_US_RXBRK) {
/*
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
/*
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status, pending, pass_counter = 0;
do {
status = UART_GET_CSR(port);
pending = status & UART_GET_IMR(port);
if (!pending)
break;
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *xmit = &port->state->xmit;
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (UART_GET_TCR(port))
return;
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(port->state->port.tty);
spin_lock(&port->lock);
}
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
UART_PUT_CR(port, ATMEL_US_STTTO);
pdc = &atmel_port->pdc_rx[rx_idx];
head = UART_GET_RPR(port) - pdc->dma_addr;
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
UART_PUT_RNPR(port, pdc->dma_addr);
UART_PUT_RNCR(port, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(tty);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status;
unsigned int status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
if (atmel_use_dma_tx(port))
atmel_tx_dma(port);
else
atmel_tx_chars(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->irq_status_prev = status;
}
if (atmel_use_dma_rx(port))
atmel_rx_from_dma(port);
else
atmel_rx_from_ring(port);
spin_unlock(&port->lock);
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
UART_PUT_IDR(port, -1);
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
if (retval) {
printk("atmel_serial: atmel_startup - Can't get irq\n");
return retval;
}
/*
* Initialize DMA (if necessary)
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
free_irq(port->irq, port);
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
UART_PUT_RCR(port, PDC_BUFFER_SIZE);
UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
}
/*
* If there is a specific "open" function (to register
* control line interrupts)
*/
if (atmel_open_hook) {
retval = atmel_open_hook(port);
if (retval) {
free_irq(port->irq, port);
return retval;
}
}
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (atmel_use_dma_rx(port)) {
/* set UART timeout */
UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
UART_PUT_CR(port, ATMEL_US_STTTO);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
/* enable PDC controller */
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
/* enable receive only */
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
return 0;
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Ensure everything is stopped.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
/*
* Shut-down the DMA.
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Disable all interrupts, port and break condition.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, -1);
/*
* Free the interrupt
*/
free_irq(port->irq, port);
/*
* If there is a specific "close" function (to unregister
* control line interrupts)
*/
if (atmel_close_hook)
atmel_close_hook(port);
}
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
UART_PUT_TCR(port, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
switch (state) {
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
UART_PUT_IER(port, atmel_port->backup_imr);
break;
case 3:
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable(atmel_port->clk);
break;
default:
printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
}
}
/*
* Change the port parameters
*/
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
| ATMEL_US_NBSTOP | ATMEL_US_PAR
| ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
quot /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
}
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mode |= ATMEL_US_CHRL_5;
break;
case CS6:
mode |= ATMEL_US_CHRL_6;
break;
case CS7:
mode |= ATMEL_US_CHRL_7;
break;
default:
mode |= ATMEL_US_CHRL_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
mode |= ATMEL_US_NBSTOP_2;
/* parity */
if (termios->c_cflag & PARENB) {
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_MARK;
else
mode |= ATMEL_US_PAR_SPACE;
} else if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_ODD;
else
mode |= ATMEL_US_PAR_EVEN;
} else
mode |= ATMEL_US_PAR_NONE;
/* hardware handshake (RTS/CTS) */
if (termios->c_cflag & CRTSCTS)
mode |= ATMEL_US_USMODE_HWHS;
else
mode |= ATMEL_US_USMODE_NORMAL;
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
if (atmel_use_dma_rx(port))
/* need to enable error interrupts */
UART_PUT_IER(port, port->read_status_mask);
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= ATMEL_US_RXBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= ATMEL_US_OVRE;
}
/* TODO: Ignore all characters if CREAD is set.*/
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
* save/disable interrupts. The tty layer will ensure that the
* transmitter is empty if requested by the caller, so there's
* no need to wait for it here.
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
/* set the baud rate */
UART_PUT_BRGR(port, quot);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
/* restore interrupts */
UART_PUT_IER(port, imr);
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
port->ops->enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void atmel_set_ldisc(struct uart_port *port, int new)
{
int line = port->line;
if (line >= port->state->port.tty->driver->num)
return;
if (port->state->port.tty->ldisc->ops->num == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
}
}
/*
* Return string describing the specified port
*/
static const char *atmel_type(struct uart_port *port)
{
return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void atmel_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
release_mem_region(port->mapbase, size);
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int atmel_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void atmel_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_ATMEL;
atmel_request_port(port);
}
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (port->uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)port->mapbase != ser->iomem_base)
ret = -EINVAL;
if (port->iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
#ifdef CONFIG_CONSOLE_POLL
static int atmel_poll_get_char(struct uart_port *port)
{
while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
cpu_relax();
return UART_GET_CHAR(port);
}
static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
#endif
static int
atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
{
struct serial_rs485 rs485conf;
switch (cmd) {
case TIOCSRS485:
if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
sizeof(rs485conf)))
return -EFAULT;
atmel_config_rs485(port, &rs485conf);
break;
case TIOCGRS485:
if (copy_to_user((struct serial_rs485 *) arg,
&(to_atmel_uart_port(port)->rs485),
sizeof(rs485conf)))
return -EFAULT;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
.get_mctrl = atmel_get_mctrl,
.stop_tx = atmel_stop_tx,
.start_tx = atmel_start_tx,
.stop_rx = atmel_stop_rx,
.enable_ms = atmel_enable_ms,
.break_ctl = atmel_break_ctl,
.startup = atmel_startup,
.shutdown = atmel_shutdown,
.flush_buffer = atmel_flush_buffer,
.set_termios = atmel_set_termios,
.set_ldisc = atmel_set_ldisc,
.type = atmel_type,
.release_port = atmel_release_port,
.request_port = atmel_request_port,
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
.ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
#endif
};
/*
* Configure the port from the platform device resource info.
*/
static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *data = pdev->dev.platform_data;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (data->regs)
/* Already mapped by setup code */
port->membase = data->regs;
else {
port->flags |= UPF_IOREMAP;
port->membase = NULL;
}
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&pdev->dev, "usart");
clk_enable(atmel_port->clk);
port->uartclk = clk_get_rate(atmel_port->clk);
clk_disable(atmel_port->clk);
/* only enable clock when USART is in use */
}
atmel_port->use_dma_rx = data->use_dma_rx;
atmel_port->use_dma_tx = data->use_dma_tx;
atmel_port->rs485 = data->rs485;
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_dma_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
} else {
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
}
/*
* Register board-specific modem-control line handlers.
*/
void __init atmel_register_uart_fns(struct atmel_port_fns *fns)
{
if (fns->enable_ms)
atmel_pops.enable_ms = fns->enable_ms;
if (fns->get_mctrl)
atmel_pops.get_mctrl = fns->get_mctrl;
if (fns->set_mctrl)
atmel_pops.set_mctrl = fns->set_mctrl;
atmel_open_hook = fns->open;
atmel_close_hook = fns->close;
atmel_pops.pm = fns->pm;
atmel_pops.set_wake = fns->set_wake;
}
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
static void atmel_console_putchar(struct uart_port *port, int ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
/*
* Interrupts are disabled on entering
*/
static void atmel_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, imr;
unsigned int pdc_tx;
/*
* First, save IMR and then disable interrupts
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
uart_console_write(port, s, count, atmel_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore IMR
*/
do {
status = UART_GET_CSR(port);
} while (!(status & ATMEL_US_TXRDY));
/* Restore PDC transmit status */
if (pdc_tx)
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* set interrupts back the way they were */
UART_PUT_IER(port, imr);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init atmel_console_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
unsigned int mr, quot;
/*
* If the baud rate generator isn't running, the port wasn't
* initialized by the boot loader.
*/
quot = UART_GET_BRGR(port) & ATMEL_US_CD;
if (!quot)
return;
mr = UART_GET_MR(port) & ATMEL_US_CHRL;
if (mr == ATMEL_US_CHRL_8)
*bits = 8;
else
*bits = 7;
mr = UART_GET_MR(port) & ATMEL_US_PAR;
if (mr == ATMEL_US_PAR_EVEN)
*parity = 'e';
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
/*
* The serial core only rounds down when matching this to a
* supported baud rate. Make sure we don't end up slightly
* lower than one of those, as it would make us fall through
* to a much lower baud rate than we really want.
*/
*baud = port->uartclk / (16 * (quot - 1));
}
static int __init atmel_console_setup(struct console *co, char *options)
{
struct uart_port *port = &atmel_ports[co->index].uart;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (port->membase == NULL) {
/* Port not initialized yet - delay setup */
return -ENODEV;
}
clk_enable(atmel_ports[co->index].clk);
UART_PUT_IDR(port, -1);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
atmel_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver atmel_uart;
static struct console atmel_console = {
.name = ATMEL_DEVICENAME,
.write = atmel_console_write,
.device = uart_console_device,
.setup = atmel_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &atmel_uart,
};
#define ATMEL_CONSOLE_DEVICE (&atmel_console)
/*
* Early console initialization (before VM subsystem initialized).
*/
static int __init atmel_console_init(void)
{
if (atmel_default_console_device) {
add_preferred_console(ATMEL_DEVICENAME,
atmel_default_console_device->id, NULL);
atmel_init_port(&atmel_ports[atmel_default_console_device->id],
atmel_default_console_device);
register_console(&atmel_console);
}
return 0;
}
console_initcall(atmel_console_init);
/*
* Late console initialization.
*/
static int __init atmel_late_console_init(void)
{
if (atmel_default_console_device
&& !(atmel_console.flags & CON_ENABLED))
register_console(&atmel_console);
return 0;
}
core_initcall(atmel_late_console_init);
static inline bool atmel_is_console_port(struct uart_port *port)
{
return port->cons && port->cons->index == port->line;
}
#else
#define ATMEL_CONSOLE_DEVICE NULL
static inline bool atmel_is_console_port(struct uart_port *port)
{
return false;
}
#endif
static struct uart_driver atmel_uart = {
.owner = THIS_MODULE,
.driver_name = "atmel_serial",
.dev_name = ATMEL_DEVICENAME,
.major = SERIAL_ATMEL_MAJOR,
.minor = MINOR_START,
.nr = ATMEL_MAX_UART,
.cons = ATMEL_CONSOLE_DEVICE,
};
#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
return at91_suspend_entering_slow_clock();
#else
return false;
#endif
}
static int atmel_serial_suspend(struct platform_device *pdev,
pm_message_t state)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_is_console_port(port) && console_suspend_enabled) {
/* Drain the TX shifter */
while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
cpu_relax();
}
/* we can not wake up if we're running on slow clock */
atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
if (atmel_serial_clk_will_stop())
device_set_wakeup_enable(&pdev->dev, 0);
uart_suspend_port(&atmel_uart, port);
return 0;
}
static int atmel_serial_resume(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
uart_resume_port(&atmel_uart, port);
device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
return 0;
}
#else
#define atmel_serial_suspend NULL
#define atmel_serial_resume NULL
#endif
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
port = &atmel_ports[pdata->num];
port->backup_imr = 0;
atmel_init_port(port, pdev);
if (!atmel_use_dma_rx(&port->uart)) {
ret = -ENOMEM;
data = kmalloc(sizeof(struct atmel_uart_char)
* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
if (!data)
goto err_alloc_ring;
port->rx_ring.buf = data;
}
ret = uart_add_one_port(&atmel_uart, &port->uart);
if (ret)
goto err_add_port;
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
if (atmel_is_console_port(&port->uart)
&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
/*
* The serial core enabled the clock for us, so undo
* the clk_enable() in atmel_console_setup()
*/
clk_disable(port->clk);
}
#endif
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
if (port->rs485.flags & SER_RS485_ENABLED) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
}
return 0;
err_add_port:
kfree(port->rx_ring.buf);
port->rx_ring.buf = NULL;
err_alloc_ring:
if (!atmel_is_console_port(&port->uart)) {
clk_put(port->clk);
port->clk = NULL;
}
return ret;
}
static int __devexit atmel_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
ret = uart_remove_one_port(&atmel_uart, port);
tasklet_kill(&atmel_port->tasklet);
kfree(atmel_port->rx_ring.buf);
/* "port" is allocated statically, so we shouldn't free it */
clk_put(atmel_port->clk);
return ret;
}
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = __devexit_p(atmel_serial_remove),
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart",
.owner = THIS_MODULE,
},
};
static int __init atmel_serial_init(void)
{
int ret;
ret = uart_register_driver(&atmel_uart);
if (ret)
return ret;
ret = platform_driver_register(&atmel_serial_driver);
if (ret)
uart_unregister_driver(&atmel_uart);
return ret;
}
static void __exit atmel_serial_exit(void)
{
platform_driver_unregister(&atmel_serial_driver);
uart_unregister_driver(&atmel_uart);
}
module_init(atmel_serial_init);
module_exit(atmel_serial_exit);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usart");
| gpl-2.0 |
hexiaolong2008/linux-arm | drivers/gpio/gpio-ich.c | 753 | 13978 | /*
* Intel ICH6-10, Series 5 and 6, Atom C2000 (Avoton/Rangeley) GPIO driver
*
* Copyright (C) 2010 Extreme Engineering Solutions.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/mfd/lpc_ich.h>
#define DRV_NAME "gpio_ich"
/*
* GPIO register offsets in GPIO I/O space.
* Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
* LVLx registers. Logic in the read/write functions takes a register and
* an absolute bit number and determines the proper register offset and bit
* number in that register. For example, to read the value of GPIO bit 50
* the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
* bit 18 (50%32).
*/
enum GPIO_REG {
GPIO_USE_SEL = 0,
GPIO_IO_SEL,
GPIO_LVL,
GPO_BLINK
};
static const u8 ichx_regs[4][3] = {
{0x00, 0x30, 0x40}, /* USE_SEL[1-3] offsets */
{0x04, 0x34, 0x44}, /* IO_SEL[1-3] offsets */
{0x0c, 0x38, 0x48}, /* LVL[1-3] offsets */
{0x18, 0x18, 0x18}, /* BLINK offset */
};
static const u8 ichx_reglen[3] = {
0x30, 0x10, 0x10,
};
static const u8 avoton_regs[4][3] = {
{0x00, 0x80, 0x00},
{0x04, 0x84, 0x00},
{0x08, 0x88, 0x00},
};
static const u8 avoton_reglen[3] = {
0x10, 0x10, 0x00,
};
#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
struct ichx_desc {
/* Max GPIO pins the chipset can have */
uint ngpio;
/* chipset registers */
const u8 (*regs)[3];
const u8 *reglen;
/* GPO_BLINK is available on this chipset */
bool have_blink;
/* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
bool uses_gpe0;
/* USE_SEL is bogus on some chipsets, eg 3100 */
u32 use_sel_ignore[3];
/* Some chipsets have quirks, let these use their own request/get */
int (*request)(struct gpio_chip *chip, unsigned offset);
int (*get)(struct gpio_chip *chip, unsigned offset);
/*
* Some chipsets don't let reading output values on GPIO_LVL register
* this option allows driver caching written output values
*/
bool use_outlvl_cache;
};
static struct {
spinlock_t lock;
struct platform_device *dev;
struct gpio_chip chip;
struct resource *gpio_base; /* GPIO IO base */
struct resource *pm_base; /* Power Mangagment IO base */
struct ichx_desc *desc; /* Pointer to chipset-specific description */
u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
u8 use_gpio; /* Which GPIO groups are usable */
int outlvl_cache[3]; /* cached output values */
} ichx_priv;
static int modparam_gpiobase = -1; /* dynamic */
module_param_named(gpiobase, modparam_gpiobase, int, 0444);
MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
"which is the default.");
static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
{
unsigned long flags;
u32 data, tmp;
int reg_nr = nr / 32;
int bit = nr & 0x1f;
int ret = 0;
spin_lock_irqsave(&ichx_priv.lock, flags);
if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
data = ichx_priv.outlvl_cache[reg_nr];
else
data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
ichx_priv.gpio_base);
if (val)
data |= 1 << bit;
else
data &= ~(1 << bit);
ICHX_WRITE(data, ichx_priv.desc->regs[reg][reg_nr],
ichx_priv.gpio_base);
if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
ichx_priv.outlvl_cache[reg_nr] = data;
tmp = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
ichx_priv.gpio_base);
if (verify && data != tmp)
ret = -EPERM;
spin_unlock_irqrestore(&ichx_priv.lock, flags);
return ret;
}
static int ichx_read_bit(int reg, unsigned nr)
{
unsigned long flags;
u32 data;
int reg_nr = nr / 32;
int bit = nr & 0x1f;
spin_lock_irqsave(&ichx_priv.lock, flags);
data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr],
ichx_priv.gpio_base);
if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache)
data = ichx_priv.outlvl_cache[reg_nr] | data;
spin_unlock_irqrestore(&ichx_priv.lock, flags);
return data & (1 << bit) ? 1 : 0;
}
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
{
return !!(ichx_priv.use_gpio & (1 << (nr / 32)));
}
static int ichx_gpio_get_direction(struct gpio_chip *gpio, unsigned nr)
{
return ichx_read_bit(GPIO_IO_SEL, nr) ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
/*
* Try setting pin as an input and verify it worked since many pins
* are output-only.
*/
if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
return -EINVAL;
return 0;
}
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
ichx_write_bit(GPIO_LVL, nr, val, 0);
/*
* Try setting pin as an output and verify it worked since many pins
* are input-only.
*/
if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
return -EINVAL;
return 0;
}
static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
{
return ichx_read_bit(GPIO_LVL, nr);
}
static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
{
unsigned long flags;
u32 data;
/*
* GPI 0 - 15 need to be read from the power management registers on
* a ICH6/3100 bridge.
*/
if (nr < 16) {
if (!ichx_priv.pm_base)
return -ENXIO;
spin_lock_irqsave(&ichx_priv.lock, flags);
/* GPI 0 - 15 are latched, write 1 to clear*/
ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
data = ICHX_READ(0, ichx_priv.pm_base);
spin_unlock_irqrestore(&ichx_priv.lock, flags);
return (data >> 16) & (1 << nr) ? 1 : 0;
} else {
return ichx_gpio_get(chip, nr);
}
}
static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
{
if (!ichx_gpio_check_available(chip, nr))
return -ENXIO;
/*
* Note we assume the BIOS properly set a bridge's USE value. Some
* chips (eg Intel 3100) have bogus USE values though, so first see if
* the chipset's USE value can be trusted for this specific bit.
* If it can't be trusted, assume that the pin can be used as a GPIO.
*/
if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
return 0;
return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
}
static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
{
/*
* Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
* bridge as they are controlled by USE register bits 0 and 1. See
* "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
* additional info.
*/
if (nr == 16 || nr == 17)
nr -= 16;
return ichx_gpio_request(chip, nr);
}
static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
{
ichx_write_bit(GPIO_LVL, nr, val, 0);
}
static void ichx_gpiolib_setup(struct gpio_chip *chip)
{
chip->owner = THIS_MODULE;
chip->label = DRV_NAME;
chip->dev = &ichx_priv.dev->dev;
/* Allow chip-specific overrides of request()/get() */
chip->request = ichx_priv.desc->request ?
ichx_priv.desc->request : ichx_gpio_request;
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
chip->set = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
chip->base = modparam_gpiobase;
chip->ngpio = ichx_priv.desc->ngpio;
chip->can_sleep = false;
chip->dbg_show = NULL;
}
/* ICH6-based, 631xesb-based */
static struct ichx_desc ich6_desc = {
/* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
.request = ich6_gpio_request,
.get = ich6_gpio_get,
/* GPIO 0-15 are read in the GPE0_STS PM register */
.uses_gpe0 = true,
.ngpio = 50,
.have_blink = true,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* Intel 3100 */
static struct ichx_desc i3100_desc = {
/*
* Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
* the Intel 3100. See "Table 712. GPIO Summary Table" of 3100
* Datasheet for more info.
*/
.use_sel_ignore = {0x00130000, 0x00010000, 0x0},
/* The 3100 needs fixups for GPIO 0 - 17 */
.request = ich6_gpio_request,
.get = ich6_gpio_get,
/* GPIO 0-15 are read in the GPE0_STS PM register */
.uses_gpe0 = true,
.ngpio = 50,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* ICH7 and ICH8-based */
static struct ichx_desc ich7_desc = {
.ngpio = 50,
.have_blink = true,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* ICH9-based */
static struct ichx_desc ich9_desc = {
.ngpio = 61,
.have_blink = true,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
static struct ichx_desc ich10_cons_desc = {
.ngpio = 61,
.have_blink = true,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
static struct ichx_desc ich10_corp_desc = {
.ngpio = 72,
.have_blink = true,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* Intel 5 series, 6 series, 3400 series, and C200 series */
static struct ichx_desc intel5_desc = {
.ngpio = 76,
.regs = ichx_regs,
.reglen = ichx_reglen,
};
/* Avoton */
static struct ichx_desc avoton_desc = {
/* Avoton has only 59 GPIOs, but we assume the first set of register
* (Core) has 32 instead of 31 to keep gpio-ich compliance
*/
.ngpio = 60,
.regs = avoton_regs,
.reglen = avoton_reglen,
.use_outlvl_cache = true,
};
static int ichx_gpio_request_regions(struct resource *res_base,
const char *name, u8 use_gpio)
{
int i;
if (!res_base || !res_base->start || !res_base->end)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
if (!request_region(
res_base->start + ichx_priv.desc->regs[0][i],
ichx_priv.desc->reglen[i], name))
goto request_err;
}
return 0;
request_err:
/* Clean up: release already requested regions, if any */
for (i--; i >= 0; i--) {
if (!(use_gpio & (1 << i)))
continue;
release_region(res_base->start + ichx_priv.desc->regs[0][i],
ichx_priv.desc->reglen[i]);
}
return -EBUSY;
}
static void ichx_gpio_release_regions(struct resource *res_base, u8 use_gpio)
{
int i;
for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
release_region(res_base->start + ichx_priv.desc->regs[0][i],
ichx_priv.desc->reglen[i]);
}
}
static int ichx_gpio_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_pm;
int err;
struct lpc_ich_info *ich_info = dev_get_platdata(&pdev->dev);
if (!ich_info)
return -ENODEV;
ichx_priv.dev = pdev;
switch (ich_info->gpio_version) {
case ICH_I3100_GPIO:
ichx_priv.desc = &i3100_desc;
break;
case ICH_V5_GPIO:
ichx_priv.desc = &intel5_desc;
break;
case ICH_V6_GPIO:
ichx_priv.desc = &ich6_desc;
break;
case ICH_V7_GPIO:
ichx_priv.desc = &ich7_desc;
break;
case ICH_V9_GPIO:
ichx_priv.desc = &ich9_desc;
break;
case ICH_V10CORP_GPIO:
ichx_priv.desc = &ich10_corp_desc;
break;
case ICH_V10CONS_GPIO:
ichx_priv.desc = &ich10_cons_desc;
break;
case AVOTON_GPIO:
ichx_priv.desc = &avoton_desc;
break;
default:
return -ENODEV;
}
spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
err = ichx_gpio_request_regions(res_base, pdev->name,
ichx_priv.use_gpio);
if (err)
return err;
ichx_priv.gpio_base = res_base;
/*
* If necessary, determine the I/O address of ACPI/power management
* registers which are needed to read the the GPE0 register for GPI pins
* 0 - 15 on some chipsets.
*/
if (!ichx_priv.desc->uses_gpe0)
goto init;
res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
if (!res_pm) {
pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
goto init;
}
if (!request_region(res_pm->start, resource_size(res_pm),
pdev->name)) {
pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
goto init;
}
ichx_priv.pm_base = res_pm;
init:
ichx_gpiolib_setup(&ichx_priv.chip);
err = gpiochip_add(&ichx_priv.chip);
if (err) {
pr_err("Failed to register GPIOs\n");
goto add_err;
}
pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
return 0;
add_err:
ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
if (ichx_priv.pm_base)
release_region(ichx_priv.pm_base->start,
resource_size(ichx_priv.pm_base));
return err;
}
static int ichx_gpio_remove(struct platform_device *pdev)
{
gpiochip_remove(&ichx_priv.chip);
ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
if (ichx_priv.pm_base)
release_region(ichx_priv.pm_base->start,
resource_size(ichx_priv.pm_base));
return 0;
}
static struct platform_driver ichx_gpio_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ichx_gpio_probe,
.remove = ichx_gpio_remove,
};
module_platform_driver(ichx_gpio_driver);
MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:"DRV_NAME);
| gpl-2.0 |
cleaton/liquid_kernel | drivers/mtd/maps/ichxrom.c | 753 | 9954 | /*
* ichxrom.c
*
* Normal mappings of chips in physical memory
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define xstr(s) str(s)
#define str(s) #s
#define MOD_NAME xstr(KBUILD_BASENAME)
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
#define BIOS_CNTL 0x4e
#define FWH_DEC_EN1 0xE3
#define FWH_DEC_EN2 0xF0
#define FWH_SEL1 0xE8
#define FWH_SEL2 0xEE
struct ichxrom_window {
void __iomem* virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct ichxrom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
static struct ichxrom_window ichxrom_window = {
.maps = LIST_HEAD_INIT(ichxrom_window.maps),
};
static void ichxrom_cleanup(struct ichxrom_window *window)
{
struct ichxrom_map_info *map, *scratch;
u16 word;
/* Disable writes through the rom window */
pci_read_config_word(window->pdev, BIOS_CNTL, &word);
pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
pci_dev_put(window->pdev);
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
del_mtd_device(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
window->pdev = NULL;
}
}
static int __devinit ichxrom_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
struct ichxrom_map_info *map = NULL;
unsigned long map_top;
u8 byte;
u16 word;
/* For now I just handle the ichx and I assume there
* are not a lot of resources up at the top of the address
* space. It is possible to handle other devices in the
* top 16MB but it is very painful. Also since
* you can only really attach a FWH to an ICHX there
* a number of simplifications you can make.
*
* Also you can page firmware hubs if an 8MB window isn't enough
* but don't currently handle that case either.
*/
window->pdev = pdev;
/* Find a region continuous to the end of the ROM window */
window->phys = 0;
pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
if (byte == 0xff) {
window->phys = 0xffc00000;
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
if ((byte & 0x0f) == 0x0f) {
window->phys = 0xff400000;
}
else if ((byte & 0x0e) == 0x0e) {
window->phys = 0xff500000;
}
else if ((byte & 0x0c) == 0x0c) {
window->phys = 0xff600000;
}
else if ((byte & 0x08) == 0x08) {
window->phys = 0xff700000;
}
}
else if ((byte & 0xfe) == 0xfe) {
window->phys = 0xffc80000;
}
else if ((byte & 0xfc) == 0xfc) {
window->phys = 0xffd00000;
}
else if ((byte & 0xf8) == 0xf8) {
window->phys = 0xffd80000;
}
else if ((byte & 0xf0) == 0xf0) {
window->phys = 0xffe00000;
}
else if ((byte & 0xe0) == 0xe0) {
window->phys = 0xffe80000;
}
else if ((byte & 0xc0) == 0xc0) {
window->phys = 0xfff00000;
}
else if ((byte & 0x80) == 0x80) {
window->phys = 0xfff80000;
}
if (window->phys == 0) {
printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
goto out;
}
window->phys -= 0x400000UL;
window->size = (0xffffffffUL - window->phys) + 1UL;
/* Enable writes through the rom window */
pci_read_config_word(pdev, BIOS_CNTL, &word);
if (!(word & 1) && (word & (1<<1))) {
/* The BIOS will generate an error if I enable
* this device, so don't even try.
*/
printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
goto out;
}
pci_write_config_word(pdev, BIOS_CNTL, word | 1);
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to the window being "reseved" by the BIOS.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_DEBUG MOD_NAME
": %s(): Unable to register resource"
" 0x%.16llx-0x%.16llx - kernel bug?\n",
__func__,
(unsigned long long)window->rsrc.start,
(unsigned long long)window->rsrc.end);
}
/* Map the firmware hub into my address space. */
window->virt = ioremap_nocache(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for an rom chip at */
map_top = window->phys;
if ((window->phys & 0x3fffff) != 0) {
map_top = window->phys + 0x400000;
}
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* Probe at most the last 4M of the address space.
*/
if (map_top < 0xffc00000) {
map_top = 0xffc00000;
}
#endif
/* Loop through and look for rom chips */
while((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
}
if (!map) {
printk(KERN_ERR MOD_NAME ": kmalloc failed");
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* Firmware hubs only use vpp when being programmed
* in a factory setting. So in-place programming
* needs to use a different method.
*/
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1)
{
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++) {
cfi->chips[i].start += offset;
}
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (add_mtd_device(map->mtd)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
ichxrom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, },
};
#if 0
MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
static struct pci_driver ichxrom_driver = {
.name = MOD_NAME,
.id_table = ichxrom_pci_tbl,
.probe = ichxrom_init_one,
.remove = ichxrom_remove_one,
};
#endif
static int __init init_ichxrom(void)
{
struct pci_dev *pdev;
struct pci_device_id *id;
pdev = NULL;
for (id = ichxrom_pci_tbl; id->vendor; id++) {
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
break;
}
}
if (pdev) {
return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
}
return -ENXIO;
#if 0
return pci_register_driver(&ichxrom_driver);
#endif
}
static void __exit cleanup_ichxrom(void)
{
ichxrom_remove_one(ichxrom_window.pdev);
}
module_init(init_ichxrom);
module_exit(cleanup_ichxrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
| gpl-2.0 |
agat63/E4GT_ICS_kernel | drivers/tty/serial/atmel_serial.c | 753 | 45961 | /*
* Driver for Atmel AT91 / AT32 Serial ports
* Copyright (C) 2003 Rick Bronson
*
* Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* DMA support added by Chip Coldwell.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/ioctls.h>
#include <asm/mach/serial_at91.h>
#include <mach/board.h>
#ifdef CONFIG_ARM
#include <mach/cpu.h>
#include <mach/gpio.h>
#endif
#define PDC_BUFFER_SIZE 512
/* Revisit: We should calculate this based on the actual port settings */
#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
static void atmel_start_rx(struct uart_port *port);
static void atmel_stop_rx(struct uart_port *port);
#ifdef CONFIG_SERIAL_ATMEL_TTYAT
/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
* should coexist with the 8250 driver, such as if we have an external 16C550
* UART. */
#define SERIAL_ATMEL_MAJOR 204
#define MINOR_START 154
#define ATMEL_DEVICENAME "ttyAT"
#else
/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
* name, but it is legally reserved for the 8250 driver. */
#define SERIAL_ATMEL_MAJOR TTY_MAJOR
#define MINOR_START 64
#define ATMEL_DEVICENAME "ttyS"
#endif
#define ATMEL_ISR_PASS_LIMIT 256
/* UART registers. CR is write-only, hence no GET macro */
#define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR)
#define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR)
#define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR)
#define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER)
#define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR)
#define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR)
#define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR)
#define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR)
#define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR)
#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
#define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR)
#define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
#define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
#define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
#define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
static int (*atmel_open_hook)(struct uart_port *);
static void (*atmel_close_hook)(struct uart_port *);
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
unsigned int dma_size;
unsigned int ofs;
};
struct atmel_uart_char {
u16 status;
u16 ch;
};
#define ATMEL_SERIAL_RINGSIZE 1024
/*
* We wrap our port structure around the generic uart_port.
*/
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
short use_dma_rx; /* enable PDC receiver */
short pdc_rx_idx; /* current PDC RX buffer */
struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
short use_dma_tx; /* enable PDC transmitter */
struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
struct tasklet_struct tasklet;
unsigned int irq_status;
unsigned int irq_status_prev;
struct circ_buf rx_ring;
struct serial_rs485 rs485; /* rs485 settings */
unsigned int tx_done_mask;
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
#ifdef SUPPORT_SYSRQ
static struct console atmel_console;
#endif
static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port *uart)
{
return container_of(uart, struct atmel_uart_port, uart);
}
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_dma_rx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_rx;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_tx;
}
#else
static bool atmel_use_dma_rx(struct uart_port *port)
{
return false;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
return false;
}
#endif
/* Enable or disable the rs485 support */
void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
spin_lock(&port->lock);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
mode = UART_GET_MR(port);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
atmel_port->rs485 = *rs485conf;
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
if (atmel_use_dma_tx(port))
atmel_port->tx_done_mask = ATMEL_US_ENDTX |
ATMEL_US_TXBUFE;
else
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
UART_PUT_MR(port, mode);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
spin_unlock(&port->lock);
}
/*
* Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
}
/*
* Set state of the modem control output lines
*/
static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
unsigned int mode;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
#ifdef CONFIG_ARCH_AT91RM9200
if (cpu_is_at91rm9200()) {
/*
* AT91RM9200 Errata #39: RTS0 is not internally connected
* to PA21. We need to drive the pin manually.
*/
if (port->mapbase == AT91RM9200_BASE_US0) {
if (mctrl & TIOCM_RTS)
at91_set_gpio_value(AT91_PIN_PA21, 0);
else
at91_set_gpio_value(AT91_PIN_PA21, 1);
}
}
#endif
if (mctrl & TIOCM_RTS)
control |= ATMEL_US_RTSEN;
else
control |= ATMEL_US_RTSDIS;
if (mctrl & TIOCM_DTR)
control |= ATMEL_US_DTREN;
else
control |= ATMEL_US_DTRDIS;
UART_PUT_CR(port, control);
/* Local loopback mode? */
mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE;
if (mctrl & TIOCM_LOOP)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
UART_PUT_MR(port, mode);
}
/*
* Get state of the modem control input lines
*/
static u_int atmel_get_mctrl(struct uart_port *port)
{
unsigned int status, ret = 0;
status = UART_GET_CSR(port);
/*
* The control signals are active low.
*/
if (!(status & ATMEL_US_DCD))
ret |= TIOCM_CD;
if (!(status & ATMEL_US_CTS))
ret |= TIOCM_CTS;
if (!(status & ATMEL_US_DSR))
ret |= TIOCM_DSR;
if (!(status & ATMEL_US_RI))
ret |= TIOCM_RI;
return ret;
}
/*
* Stop transmitting.
*/
static void atmel_stop_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
}
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_start_rx(port);
}
/*
* Start transmitting.
*/
static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_stop_rx(port);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
}
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* start receiving - port is in process of being opened.
*/
static void atmel_start_rx(struct uart_port *port)
{
UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
if (atmel_use_dma_rx(port)) {
/* enable PDC controller */
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
}
/*
* Stop receiving - port is in process of being closed.
*/
static void atmel_stop_rx(struct uart_port *port)
{
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
} else {
UART_PUT_IDR(port, ATMEL_US_RXRDY);
}
}
/*
* Enable modem status interrupts
*/
static void atmel_enable_ms(struct uart_port *port)
{
UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC
| ATMEL_US_DCDIC | ATMEL_US_CTSIC);
}
/*
* Control the transmission of a break signal
*/
static void atmel_break_ctl(struct uart_port *port, int break_state)
{
if (break_state != 0)
UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */
else
UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */
}
/*
* Stores the incoming character in the ring buffer
*/
static void
atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
unsigned int ch)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
struct atmel_uart_char *c;
if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
/* Buffer overflow, ignore char */
return;
c = &((struct atmel_uart_char *)ring->buf)[ring->head];
c->status = status;
c->ch = ch;
/* Make sure the character is stored before we update head. */
smp_wmb();
ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
}
/*
* Deal with parity, framing and overrun errors.
*/
static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
{
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
}
/*
* Characters received (called from interrupt handler)
*/
static void atmel_rx_chars(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, ch;
status = UART_GET_CSR(port);
while (status & ATMEL_US_RXRDY) {
ch = UART_GET_CHAR(port);
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK)
|| atmel_port->break_active)) {
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK
&& !atmel_port->break_active) {
atmel_port->break_active = 1;
UART_PUT_IER(port, ATMEL_US_RXBRK);
} else {
/*
* This is either the end-of-break
* condition or we've received at
* least one character without RXBRK
* being set. In both cases, the next
* RXBRK will indicate start-of-break.
*/
UART_PUT_IDR(port, ATMEL_US_RXBRK);
status &= ~ATMEL_US_RXBRK;
atmel_port->break_active = 0;
}
}
atmel_buffer_rx_char(port, status, ch);
status = UART_GET_CSR(port);
}
tasklet_schedule(&atmel_port->tasklet);
}
/*
* Transmit characters (called from tasklet with TXRDY interrupt
* disabled)
*/
static void atmel_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit))
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* receive interrupt handler.
*/
static void
atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
*
* TODO: We're not handling error flags correctly at
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
UART_PUT_IDR(port, (ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
ATMEL_US_FRAME | ATMEL_US_PARE))
atmel_pdc_rxerr(port, pending);
}
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
else if (pending & ATMEL_US_RXBRK) {
/*
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
/*
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status, pending, pass_counter = 0;
do {
status = UART_GET_CSR(port);
pending = status & UART_GET_IMR(port);
if (!pending)
break;
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *xmit = &port->state->xmit;
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (UART_GET_TCR(port))
return;
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(port->state->port.tty);
spin_lock(&port->lock);
}
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
UART_PUT_CR(port, ATMEL_US_STTTO);
pdc = &atmel_port->pdc_rx[rx_idx];
head = UART_GET_RPR(port) - pdc->dma_addr;
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
UART_PUT_RNPR(port, pdc->dma_addr);
UART_PUT_RNCR(port, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(tty);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status;
unsigned int status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
if (atmel_use_dma_tx(port))
atmel_tx_dma(port);
else
atmel_tx_chars(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->irq_status_prev = status;
}
if (atmel_use_dma_rx(port))
atmel_rx_from_dma(port);
else
atmel_rx_from_ring(port);
spin_unlock(&port->lock);
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
UART_PUT_IDR(port, -1);
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
if (retval) {
printk("atmel_serial: atmel_startup - Can't get irq\n");
return retval;
}
/*
* Initialize DMA (if necessary)
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
free_irq(port->irq, port);
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
UART_PUT_RCR(port, PDC_BUFFER_SIZE);
UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
}
/*
* If there is a specific "open" function (to register
* control line interrupts)
*/
if (atmel_open_hook) {
retval = atmel_open_hook(port);
if (retval) {
free_irq(port->irq, port);
return retval;
}
}
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (atmel_use_dma_rx(port)) {
/* set UART timeout */
UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
UART_PUT_CR(port, ATMEL_US_STTTO);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
/* enable PDC controller */
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
/* enable receive only */
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
return 0;
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Ensure everything is stopped.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
/*
* Shut-down the DMA.
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Disable all interrupts, port and break condition.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, -1);
/*
* Free the interrupt
*/
free_irq(port->irq, port);
/*
* If there is a specific "close" function (to unregister
* control line interrupts)
*/
if (atmel_close_hook)
atmel_close_hook(port);
}
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
UART_PUT_TCR(port, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
switch (state) {
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
UART_PUT_IER(port, atmel_port->backup_imr);
break;
case 3:
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable(atmel_port->clk);
break;
default:
printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
}
}
/*
* Change the port parameters
*/
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
| ATMEL_US_NBSTOP | ATMEL_US_PAR
| ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
quot /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
}
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mode |= ATMEL_US_CHRL_5;
break;
case CS6:
mode |= ATMEL_US_CHRL_6;
break;
case CS7:
mode |= ATMEL_US_CHRL_7;
break;
default:
mode |= ATMEL_US_CHRL_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
mode |= ATMEL_US_NBSTOP_2;
/* parity */
if (termios->c_cflag & PARENB) {
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_MARK;
else
mode |= ATMEL_US_PAR_SPACE;
} else if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_ODD;
else
mode |= ATMEL_US_PAR_EVEN;
} else
mode |= ATMEL_US_PAR_NONE;
/* hardware handshake (RTS/CTS) */
if (termios->c_cflag & CRTSCTS)
mode |= ATMEL_US_USMODE_HWHS;
else
mode |= ATMEL_US_USMODE_NORMAL;
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
if (atmel_use_dma_rx(port))
/* need to enable error interrupts */
UART_PUT_IER(port, port->read_status_mask);
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= ATMEL_US_RXBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= ATMEL_US_OVRE;
}
/* TODO: Ignore all characters if CREAD is set.*/
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
* save/disable interrupts. The tty layer will ensure that the
* transmitter is empty if requested by the caller, so there's
* no need to wait for it here.
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
/* set the baud rate */
UART_PUT_BRGR(port, quot);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
/* restore interrupts */
UART_PUT_IER(port, imr);
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
port->ops->enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void atmel_set_ldisc(struct uart_port *port, int new)
{
int line = port->line;
if (line >= port->state->port.tty->driver->num)
return;
if (port->state->port.tty->ldisc->ops->num == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
}
}
/*
* Return string describing the specified port
*/
static const char *atmel_type(struct uart_port *port)
{
return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void atmel_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
release_mem_region(port->mapbase, size);
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int atmel_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void atmel_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_ATMEL;
atmel_request_port(port);
}
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (port->uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)port->mapbase != ser->iomem_base)
ret = -EINVAL;
if (port->iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
#ifdef CONFIG_CONSOLE_POLL
static int atmel_poll_get_char(struct uart_port *port)
{
while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
cpu_relax();
return UART_GET_CHAR(port);
}
static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
#endif
static int
atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
{
struct serial_rs485 rs485conf;
switch (cmd) {
case TIOCSRS485:
if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
sizeof(rs485conf)))
return -EFAULT;
atmel_config_rs485(port, &rs485conf);
break;
case TIOCGRS485:
if (copy_to_user((struct serial_rs485 *) arg,
&(to_atmel_uart_port(port)->rs485),
sizeof(rs485conf)))
return -EFAULT;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
.get_mctrl = atmel_get_mctrl,
.stop_tx = atmel_stop_tx,
.start_tx = atmel_start_tx,
.stop_rx = atmel_stop_rx,
.enable_ms = atmel_enable_ms,
.break_ctl = atmel_break_ctl,
.startup = atmel_startup,
.shutdown = atmel_shutdown,
.flush_buffer = atmel_flush_buffer,
.set_termios = atmel_set_termios,
.set_ldisc = atmel_set_ldisc,
.type = atmel_type,
.release_port = atmel_release_port,
.request_port = atmel_request_port,
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
.ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
#endif
};
/*
* Configure the port from the platform device resource info.
*/
static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *data = pdev->dev.platform_data;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (data->regs)
/* Already mapped by setup code */
port->membase = data->regs;
else {
port->flags |= UPF_IOREMAP;
port->membase = NULL;
}
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&pdev->dev, "usart");
clk_enable(atmel_port->clk);
port->uartclk = clk_get_rate(atmel_port->clk);
clk_disable(atmel_port->clk);
/* only enable clock when USART is in use */
}
atmel_port->use_dma_rx = data->use_dma_rx;
atmel_port->use_dma_tx = data->use_dma_tx;
atmel_port->rs485 = data->rs485;
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_dma_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
} else {
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
}
/*
* Register board-specific modem-control line handlers.
*/
void __init atmel_register_uart_fns(struct atmel_port_fns *fns)
{
if (fns->enable_ms)
atmel_pops.enable_ms = fns->enable_ms;
if (fns->get_mctrl)
atmel_pops.get_mctrl = fns->get_mctrl;
if (fns->set_mctrl)
atmel_pops.set_mctrl = fns->set_mctrl;
atmel_open_hook = fns->open;
atmel_close_hook = fns->close;
atmel_pops.pm = fns->pm;
atmel_pops.set_wake = fns->set_wake;
}
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
static void atmel_console_putchar(struct uart_port *port, int ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
/*
* Interrupts are disabled on entering
*/
static void atmel_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, imr;
unsigned int pdc_tx;
/*
* First, save IMR and then disable interrupts
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
uart_console_write(port, s, count, atmel_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore IMR
*/
do {
status = UART_GET_CSR(port);
} while (!(status & ATMEL_US_TXRDY));
/* Restore PDC transmit status */
if (pdc_tx)
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* set interrupts back the way they were */
UART_PUT_IER(port, imr);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init atmel_console_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
unsigned int mr, quot;
/*
* If the baud rate generator isn't running, the port wasn't
* initialized by the boot loader.
*/
quot = UART_GET_BRGR(port) & ATMEL_US_CD;
if (!quot)
return;
mr = UART_GET_MR(port) & ATMEL_US_CHRL;
if (mr == ATMEL_US_CHRL_8)
*bits = 8;
else
*bits = 7;
mr = UART_GET_MR(port) & ATMEL_US_PAR;
if (mr == ATMEL_US_PAR_EVEN)
*parity = 'e';
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
/*
* The serial core only rounds down when matching this to a
* supported baud rate. Make sure we don't end up slightly
* lower than one of those, as it would make us fall through
* to a much lower baud rate than we really want.
*/
*baud = port->uartclk / (16 * (quot - 1));
}
static int __init atmel_console_setup(struct console *co, char *options)
{
struct uart_port *port = &atmel_ports[co->index].uart;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (port->membase == NULL) {
/* Port not initialized yet - delay setup */
return -ENODEV;
}
clk_enable(atmel_ports[co->index].clk);
UART_PUT_IDR(port, -1);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
atmel_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver atmel_uart;
static struct console atmel_console = {
.name = ATMEL_DEVICENAME,
.write = atmel_console_write,
.device = uart_console_device,
.setup = atmel_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &atmel_uart,
};
#define ATMEL_CONSOLE_DEVICE (&atmel_console)
/*
* Early console initialization (before VM subsystem initialized).
*/
static int __init atmel_console_init(void)
{
if (atmel_default_console_device) {
add_preferred_console(ATMEL_DEVICENAME,
atmel_default_console_device->id, NULL);
atmel_init_port(&atmel_ports[atmel_default_console_device->id],
atmel_default_console_device);
register_console(&atmel_console);
}
return 0;
}
console_initcall(atmel_console_init);
/*
* Late console initialization.
*/
static int __init atmel_late_console_init(void)
{
if (atmel_default_console_device
&& !(atmel_console.flags & CON_ENABLED))
register_console(&atmel_console);
return 0;
}
core_initcall(atmel_late_console_init);
static inline bool atmel_is_console_port(struct uart_port *port)
{
return port->cons && port->cons->index == port->line;
}
#else
#define ATMEL_CONSOLE_DEVICE NULL
static inline bool atmel_is_console_port(struct uart_port *port)
{
return false;
}
#endif
static struct uart_driver atmel_uart = {
.owner = THIS_MODULE,
.driver_name = "atmel_serial",
.dev_name = ATMEL_DEVICENAME,
.major = SERIAL_ATMEL_MAJOR,
.minor = MINOR_START,
.nr = ATMEL_MAX_UART,
.cons = ATMEL_CONSOLE_DEVICE,
};
#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
return at91_suspend_entering_slow_clock();
#else
return false;
#endif
}
static int atmel_serial_suspend(struct platform_device *pdev,
pm_message_t state)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_is_console_port(port) && console_suspend_enabled) {
/* Drain the TX shifter */
while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
cpu_relax();
}
/* we can not wake up if we're running on slow clock */
atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
if (atmel_serial_clk_will_stop())
device_set_wakeup_enable(&pdev->dev, 0);
uart_suspend_port(&atmel_uart, port);
return 0;
}
static int atmel_serial_resume(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
uart_resume_port(&atmel_uart, port);
device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
return 0;
}
#else
#define atmel_serial_suspend NULL
#define atmel_serial_resume NULL
#endif
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
port = &atmel_ports[pdata->num];
port->backup_imr = 0;
atmel_init_port(port, pdev);
if (!atmel_use_dma_rx(&port->uart)) {
ret = -ENOMEM;
data = kmalloc(sizeof(struct atmel_uart_char)
* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
if (!data)
goto err_alloc_ring;
port->rx_ring.buf = data;
}
ret = uart_add_one_port(&atmel_uart, &port->uart);
if (ret)
goto err_add_port;
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
if (atmel_is_console_port(&port->uart)
&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
/*
* The serial core enabled the clock for us, so undo
* the clk_enable() in atmel_console_setup()
*/
clk_disable(port->clk);
}
#endif
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
if (port->rs485.flags & SER_RS485_ENABLED) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
}
return 0;
err_add_port:
kfree(port->rx_ring.buf);
port->rx_ring.buf = NULL;
err_alloc_ring:
if (!atmel_is_console_port(&port->uart)) {
clk_put(port->clk);
port->clk = NULL;
}
return ret;
}
static int __devexit atmel_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
ret = uart_remove_one_port(&atmel_uart, port);
tasklet_kill(&atmel_port->tasklet);
kfree(atmel_port->rx_ring.buf);
/* "port" is allocated statically, so we shouldn't free it */
clk_put(atmel_port->clk);
return ret;
}
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = __devexit_p(atmel_serial_remove),
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart",
.owner = THIS_MODULE,
},
};
static int __init atmel_serial_init(void)
{
int ret;
ret = uart_register_driver(&atmel_uart);
if (ret)
return ret;
ret = platform_driver_register(&atmel_serial_driver);
if (ret)
uart_unregister_driver(&atmel_uart);
return ret;
}
static void __exit atmel_serial_exit(void)
{
platform_driver_unregister(&atmel_serial_driver);
uart_unregister_driver(&atmel_uart);
}
module_init(atmel_serial_init);
module_exit(atmel_serial_exit);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usart");
| gpl-2.0 |
caplio/valente_wx-ics | drivers/net/usb/kaweth.c | 1009 | 38194 | /****************************************************************
*
* kaweth.c - driver for KL5KUSB101 based USB->Ethernet
*
* (c) 2000 Interlan Communications
* (c) 2000 Stephane Alnet
* (C) 2001 Brad Hards
* (C) 2002 Oliver Neukum
*
* Original author: The Zapman <zapman@interlan.net>
* Inspired by, and much credit goes to Michael Rothwell
* <rothwell@interlan.net> for the test equipment, help, and patience
* Based off of (and with thanks to) Petko Manolov's pegaus.c driver.
* Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki
* for providing the firmware and driver resources.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
****************************************************************/
/* TODO:
* Develop test procedures for USB net interfaces
* Run test procedures
* Fix bugs from previous two steps
* Snoop other OSs for any tricks we're not doing
* Reduce arbitrary timeouts
* Smart multicast support
* Temporary MAC change support
* Tunable SOFs parameter - ioctl()?
* Ethernet stats collection
* Code formatting improvements
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/usb.h>
#include <linux/types.h>
#include <linux/ethtool.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/firmware.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#undef DEBUG
#define KAWETH_MTU 1514
#define KAWETH_BUF_SIZE 1664
#define KAWETH_TX_TIMEOUT (5 * HZ)
#define KAWETH_SCRATCH_SIZE 32
#define KAWETH_FIRMWARE_BUF_SIZE 4096
#define KAWETH_CONTROL_TIMEOUT (30000)
#define KAWETH_STATUS_BROKEN 0x0000001
#define KAWETH_STATUS_CLOSING 0x0000002
#define KAWETH_STATUS_SUSPENDING 0x0000004
#define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING)
#define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01
#define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02
#define KAWETH_PACKET_FILTER_DIRECTED 0x04
#define KAWETH_PACKET_FILTER_BROADCAST 0x08
#define KAWETH_PACKET_FILTER_MULTICAST 0x10
/* Table 7 */
#define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00
#define KAWETH_COMMAND_MULTICAST_FILTERS 0x01
#define KAWETH_COMMAND_SET_PACKET_FILTER 0x02
#define KAWETH_COMMAND_STATISTICS 0x03
#define KAWETH_COMMAND_SET_TEMP_MAC 0x06
#define KAWETH_COMMAND_GET_TEMP_MAC 0x07
#define KAWETH_COMMAND_SET_URB_SIZE 0x08
#define KAWETH_COMMAND_SET_SOFS_WAIT 0x09
#define KAWETH_COMMAND_SCAN 0xFF
#define KAWETH_SOFS_TO_WAIT 0x05
#define INTBUFFERSIZE 4
#define STATE_OFFSET 0
#define STATE_MASK 0x40
#define STATE_SHIFT 5
#define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED)
MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>");
MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("kaweth/new_code.bin");
MODULE_FIRMWARE("kaweth/new_code_fix.bin");
MODULE_FIRMWARE("kaweth/trigger_code.bin");
MODULE_FIRMWARE("kaweth/trigger_code_fix.bin");
static const char driver_name[] = "kaweth";
static int kaweth_probe(
struct usb_interface *intf,
const struct usb_device_id *id /* from id_table */
);
static void kaweth_disconnect(struct usb_interface *intf);
static int kaweth_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd, void *data,
int len, int timeout);
static int kaweth_suspend(struct usb_interface *intf, pm_message_t message);
static int kaweth_resume(struct usb_interface *intf);
/****************************************************************
* usb_device_id
****************************************************************/
static struct usb_device_id usb_klsi_table[] = {
{ USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */
{ USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */
{ USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */
{ USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */
{ USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */
{ USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */
{ USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */
{ USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */
{ USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */
{ USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */
{ USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */
{ USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */
{ USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */
{ USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
{ USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
{ USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */
{ USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */
{ USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */
{ USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */
{ USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */
{ USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */
{ USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */
{ USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */
{ USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */
{ USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */
{ USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */
{ USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */
{ USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */
{ USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */
{} /* Null terminator */
};
MODULE_DEVICE_TABLE (usb, usb_klsi_table);
/****************************************************************
* kaweth_driver
****************************************************************/
static struct usb_driver kaweth_driver = {
.name = driver_name,
.probe = kaweth_probe,
.disconnect = kaweth_disconnect,
.suspend = kaweth_suspend,
.resume = kaweth_resume,
.id_table = usb_klsi_table,
.supports_autosuspend = 1,
};
typedef __u8 eth_addr_t[6];
/****************************************************************
* usb_eth_dev
****************************************************************/
struct usb_eth_dev {
char *name;
__u16 vendor;
__u16 device;
void *pdata;
};
/****************************************************************
* kaweth_ethernet_configuration
* Refer Table 8
****************************************************************/
struct kaweth_ethernet_configuration
{
__u8 size;
__u8 reserved1;
__u8 reserved2;
eth_addr_t hw_addr;
__u32 statistics_mask;
__le16 segment_size;
__u16 max_multicast_filters;
__u8 reserved3;
} __packed;
/****************************************************************
* kaweth_device
****************************************************************/
struct kaweth_device
{
spinlock_t device_lock;
__u32 status;
int end;
int suspend_lowmem_rx;
int suspend_lowmem_ctrl;
int linkstate;
int opened;
struct delayed_work lowmem_work;
struct usb_device *dev;
struct usb_interface *intf;
struct net_device *net;
wait_queue_head_t term_wait;
struct urb *rx_urb;
struct urb *tx_urb;
struct urb *irq_urb;
dma_addr_t intbufferhandle;
__u8 *intbuffer;
dma_addr_t rxbufferhandle;
__u8 *rx_buf;
struct sk_buff *tx_skb;
__u8 *firmware_buf;
__u8 scratch[KAWETH_SCRATCH_SIZE];
__u16 packet_filter_bitmap;
struct kaweth_ethernet_configuration configuration;
struct net_device_stats stats;
};
/****************************************************************
* kaweth_control
****************************************************************/
static int kaweth_control(struct kaweth_device *kaweth,
unsigned int pipe,
__u8 request,
__u8 requesttype,
__u16 value,
__u16 index,
void *data,
__u16 size,
int timeout)
{
struct usb_ctrlrequest *dr;
int retval;
dbg("kaweth_control()");
if(in_interrupt()) {
dbg("in_interrupt()");
return -EBUSY;
}
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
if (!dr) {
dbg("kmalloc() failed");
return -ENOMEM;
}
dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
retval = kaweth_internal_control_msg(kaweth->dev,
pipe,
dr,
data,
size,
timeout);
kfree(dr);
return retval;
}
/****************************************************************
* kaweth_read_configuration
****************************************************************/
static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
int retval;
dbg("Reading kaweth configuration");
retval = kaweth_control(kaweth,
usb_rcvctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_GET_ETHERNET_DESC,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
(void *)&kaweth->configuration,
sizeof(kaweth->configuration),
KAWETH_CONTROL_TIMEOUT);
return retval;
}
/****************************************************************
* kaweth_set_urb_size
****************************************************************/
static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
{
int retval;
dbg("Setting URB size to %d", (unsigned)urb_size);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SET_URB_SIZE,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
urb_size,
0,
(void *)&kaweth->scratch,
0,
KAWETH_CONTROL_TIMEOUT);
return retval;
}
/****************************************************************
* kaweth_set_sofs_wait
****************************************************************/
static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
{
int retval;
dbg("Set SOFS wait to %d", (unsigned)sofs_wait);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SET_SOFS_WAIT,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
sofs_wait,
0,
(void *)&kaweth->scratch,
0,
KAWETH_CONTROL_TIMEOUT);
return retval;
}
/****************************************************************
* kaweth_set_receive_filter
****************************************************************/
static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
__u16 receive_filter)
{
int retval;
dbg("Set receive filter to %d", (unsigned)receive_filter);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SET_PACKET_FILTER,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
receive_filter,
0,
(void *)&kaweth->scratch,
0,
KAWETH_CONTROL_TIMEOUT);
return retval;
}
/****************************************************************
* kaweth_download_firmware
****************************************************************/
static int kaweth_download_firmware(struct kaweth_device *kaweth,
const char *fwname,
__u8 interrupt,
__u8 type)
{
const struct firmware *fw;
int data_len;
int ret;
ret = request_firmware(&fw, fwname, &kaweth->dev->dev);
if (ret) {
err("Firmware request failed\n");
return ret;
}
if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
err("Firmware too big: %zu", fw->size);
release_firmware(fw);
return -ENOSPC;
}
data_len = fw->size;
memcpy(kaweth->firmware_buf, fw->data, fw->size);
release_firmware(fw);
kaweth->firmware_buf[2] = (data_len & 0xFF) - 7;
kaweth->firmware_buf[3] = data_len >> 8;
kaweth->firmware_buf[4] = type;
kaweth->firmware_buf[5] = interrupt;
dbg("High: %i, Low:%i", kaweth->firmware_buf[3],
kaweth->firmware_buf[2]);
dbg("Downloading firmware at %p to kaweth device at %p",
fw->data, kaweth);
dbg("Firmware length: %d", data_len);
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0,
0,
(void *)kaweth->firmware_buf,
data_len,
KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
* kaweth_trigger_firmware
****************************************************************/
static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
__u8 interrupt)
{
kaweth->firmware_buf[0] = 0xB6;
kaweth->firmware_buf[1] = 0xC3;
kaweth->firmware_buf[2] = 0x01;
kaweth->firmware_buf[3] = 0x00;
kaweth->firmware_buf[4] = 0x06;
kaweth->firmware_buf[5] = interrupt;
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
dbg("Triggering firmware");
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0,
0,
(void *)kaweth->firmware_buf,
8,
KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
* kaweth_reset
****************************************************************/
static int kaweth_reset(struct kaweth_device *kaweth)
{
int result;
dbg("kaweth_reset(%p)", kaweth);
result = usb_reset_configuration(kaweth->dev);
mdelay(10);
dbg("kaweth_reset() returns %d.",result);
return result;
}
static void kaweth_usb_receive(struct urb *);
static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t);
/****************************************************************
int_callback
*****************************************************************/
static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf)
{
int status;
status = usb_submit_urb (kaweth->irq_urb, mf);
if (unlikely(status == -ENOMEM)) {
kaweth->suspend_lowmem_ctrl = 1;
schedule_delayed_work(&kaweth->lowmem_work, HZ/4);
} else {
kaweth->suspend_lowmem_ctrl = 0;
}
if (status)
err ("can't resubmit intr, %s-%s, status %d",
kaweth->dev->bus->bus_name,
kaweth->dev->devpath, status);
}
static void int_callback(struct urb *u)
{
struct kaweth_device *kaweth = u->context;
int act_state;
int status = u->status;
switch (status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
return;
/* -EPIPE: should clear the halt */
default: /* error */
goto resubmit;
}
/* we check the link state to report changes */
if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) {
if (act_state)
netif_carrier_on(kaweth->net);
else
netif_carrier_off(kaweth->net);
kaweth->linkstate = act_state;
}
resubmit:
kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
}
static void kaweth_resubmit_tl(struct work_struct *work)
{
struct kaweth_device *kaweth =
container_of(work, struct kaweth_device, lowmem_work.work);
if (IS_BLOCKED(kaweth->status))
return;
if (kaweth->suspend_lowmem_rx)
kaweth_resubmit_rx_urb(kaweth, GFP_NOIO);
if (kaweth->suspend_lowmem_ctrl)
kaweth_resubmit_int_urb(kaweth, GFP_NOIO);
}
/****************************************************************
* kaweth_resubmit_rx_urb
****************************************************************/
static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
gfp_t mem_flags)
{
int result;
usb_fill_bulk_urb(kaweth->rx_urb,
kaweth->dev,
usb_rcvbulkpipe(kaweth->dev, 1),
kaweth->rx_buf,
KAWETH_BUF_SIZE,
kaweth_usb_receive,
kaweth);
kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle;
if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) {
if (result == -ENOMEM) {
kaweth->suspend_lowmem_rx = 1;
schedule_delayed_work(&kaweth->lowmem_work, HZ/4);
}
err("resubmitting rx_urb %d failed", result);
} else {
kaweth->suspend_lowmem_rx = 0;
}
return result;
}
static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
/****************************************************************
* kaweth_usb_receive
****************************************************************/
static void kaweth_usb_receive(struct urb *urb)
{
struct kaweth_device *kaweth = urb->context;
struct net_device *net = kaweth->net;
int status = urb->status;
int count = urb->actual_length;
int count2 = urb->transfer_buffer_length;
__u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf);
struct sk_buff *skb;
if (unlikely(status == -EPIPE)) {
kaweth->stats.rx_errors++;
kaweth->end = 1;
wake_up(&kaweth->term_wait);
dbg("Status was -EPIPE.");
return;
}
if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
/* we are killed - set a flag and wake the disconnect handler */
kaweth->end = 1;
wake_up(&kaweth->term_wait);
dbg("Status was -ECONNRESET or -ESHUTDOWN.");
return;
}
if (unlikely(status == -EPROTO || status == -ETIME ||
status == -EILSEQ)) {
kaweth->stats.rx_errors++;
dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
return;
}
if (unlikely(status == -EOVERFLOW)) {
kaweth->stats.rx_errors++;
dbg("Status was -EOVERFLOW.");
}
spin_lock(&kaweth->device_lock);
if (IS_BLOCKED(kaweth->status)) {
spin_unlock(&kaweth->device_lock);
return;
}
spin_unlock(&kaweth->device_lock);
if(status && status != -EREMOTEIO && count != 1) {
err("%s RX status: %d count: %d packet_len: %d",
net->name,
status,
count,
(int)pkt_len);
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
return;
}
if(kaweth->net && (count > 2)) {
if(pkt_len > (count - 2)) {
err("Packet length too long for USB frame (pkt_len: %x, count: %x)",pkt_len, count);
err("Packet len & 2047: %x", pkt_len & 2047);
err("Count 2: %x", count2);
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
return;
}
if(!(skb = dev_alloc_skb(pkt_len+2))) {
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
return;
}
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, net);
netif_rx(skb);
kaweth->stats.rx_packets++;
kaweth->stats.rx_bytes += pkt_len;
}
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
}
/****************************************************************
* kaweth_open
****************************************************************/
static int kaweth_open(struct net_device *net)
{
struct kaweth_device *kaweth = netdev_priv(net);
int res;
dbg("Opening network device.");
res = usb_autopm_get_interface(kaweth->intf);
if (res) {
err("Interface cannot be resumed.");
return -EIO;
}
res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL);
if (res)
goto err_out;
usb_fill_int_urb(
kaweth->irq_urb,
kaweth->dev,
usb_rcvintpipe(kaweth->dev, 3),
kaweth->intbuffer,
INTBUFFERSIZE,
int_callback,
kaweth,
250); /* overriding the descriptor */
kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle;
kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL);
if (res) {
usb_kill_urb(kaweth->rx_urb);
goto err_out;
}
kaweth->opened = 1;
netif_start_queue(net);
kaweth_async_set_rx_mode(kaweth);
return 0;
err_out:
usb_autopm_put_interface(kaweth->intf);
return -EIO;
}
/****************************************************************
* kaweth_kill_urbs
****************************************************************/
static void kaweth_kill_urbs(struct kaweth_device *kaweth)
{
usb_kill_urb(kaweth->irq_urb);
usb_kill_urb(kaweth->rx_urb);
usb_kill_urb(kaweth->tx_urb);
cancel_delayed_work_sync(&kaweth->lowmem_work);
/* a scheduled work may have resubmitted,
we hit them again */
usb_kill_urb(kaweth->irq_urb);
usb_kill_urb(kaweth->rx_urb);
}
/****************************************************************
* kaweth_close
****************************************************************/
static int kaweth_close(struct net_device *net)
{
struct kaweth_device *kaweth = netdev_priv(net);
netif_stop_queue(net);
kaweth->opened = 0;
kaweth->status |= KAWETH_STATUS_CLOSING;
kaweth_kill_urbs(kaweth);
kaweth->status &= ~KAWETH_STATUS_CLOSING;
usb_autopm_put_interface(kaweth->intf);
return 0;
}
static u32 kaweth_get_link(struct net_device *dev)
{
struct kaweth_device *kaweth = netdev_priv(dev);
return kaweth->linkstate;
}
static const struct ethtool_ops ops = {
.get_link = kaweth_get_link
};
/****************************************************************
* kaweth_usb_transmit_complete
****************************************************************/
static void kaweth_usb_transmit_complete(struct urb *urb)
{
struct kaweth_device *kaweth = urb->context;
struct sk_buff *skb = kaweth->tx_skb;
int status = urb->status;
if (unlikely(status != 0))
if (status != -ENOENT)
dbg("%s: TX status %d.", kaweth->net->name, status);
netif_wake_queue(kaweth->net);
dev_kfree_skb_irq(skb);
}
/****************************************************************
* kaweth_start_xmit
****************************************************************/
static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
struct kaweth_device *kaweth = netdev_priv(net);
__le16 *private_header;
int res;
spin_lock_irq(&kaweth->device_lock);
kaweth_async_set_rx_mode(kaweth);
netif_stop_queue(net);
if (IS_BLOCKED(kaweth->status)) {
goto skip;
}
/* We now decide whether we can put our special header into the sk_buff */
if (skb_cloned(skb) || skb_headroom(skb) < 2) {
/* no such luck - we make our own */
struct sk_buff *copied_skb;
copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
dev_kfree_skb_irq(skb);
skb = copied_skb;
if (!copied_skb) {
kaweth->stats.tx_errors++;
netif_start_queue(net);
spin_unlock_irq(&kaweth->device_lock);
return NETDEV_TX_OK;
}
}
private_header = (__le16 *)__skb_push(skb, 2);
*private_header = cpu_to_le16(skb->len-2);
kaweth->tx_skb = skb;
usb_fill_bulk_urb(kaweth->tx_urb,
kaweth->dev,
usb_sndbulkpipe(kaweth->dev, 2),
private_header,
skb->len,
kaweth_usb_transmit_complete,
kaweth);
kaweth->end = 0;
if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC)))
{
dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res);
skip:
kaweth->stats.tx_errors++;
netif_start_queue(net);
dev_kfree_skb_irq(skb);
}
else
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
}
spin_unlock_irq(&kaweth->device_lock);
return NETDEV_TX_OK;
}
/****************************************************************
* kaweth_set_rx_mode
****************************************************************/
static void kaweth_set_rx_mode(struct net_device *net)
{
struct kaweth_device *kaweth = netdev_priv(net);
__u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED |
KAWETH_PACKET_FILTER_BROADCAST |
KAWETH_PACKET_FILTER_MULTICAST;
dbg("Setting Rx mode to %d", packet_filter_bitmap);
netif_stop_queue(net);
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
kaweth->packet_filter_bitmap = packet_filter_bitmap;
netif_wake_queue(net);
}
/****************************************************************
* kaweth_async_set_rx_mode
****************************************************************/
static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
{
int result;
__u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
kaweth->packet_filter_bitmap = 0;
if (packet_filter_bitmap == 0)
return;
if (in_interrupt())
return;
result = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SET_PACKET_FILTER,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
packet_filter_bitmap,
0,
(void *)&kaweth->scratch,
0,
KAWETH_CONTROL_TIMEOUT);
if(result < 0) {
err("Failed to set Rx mode: %d", result);
}
else {
dbg("Set Rx mode to %d", packet_filter_bitmap);
}
}
/****************************************************************
* kaweth_netdev_stats
****************************************************************/
static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev)
{
struct kaweth_device *kaweth = netdev_priv(dev);
return &kaweth->stats;
}
/****************************************************************
* kaweth_tx_timeout
****************************************************************/
static void kaweth_tx_timeout(struct net_device *net)
{
struct kaweth_device *kaweth = netdev_priv(net);
dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
kaweth->stats.tx_errors++;
net->trans_start = jiffies;
usb_unlink_urb(kaweth->tx_urb);
}
/****************************************************************
* kaweth_suspend
****************************************************************/
static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
{
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
dbg("Suspending device");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status |= KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
kaweth_kill_urbs(kaweth);
return 0;
}
/****************************************************************
* kaweth_resume
****************************************************************/
static int kaweth_resume(struct usb_interface *intf)
{
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
dbg("Resuming device");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
if (!kaweth->opened)
return 0;
kaweth_resubmit_rx_urb(kaweth, GFP_NOIO);
kaweth_resubmit_int_urb(kaweth, GFP_NOIO);
return 0;
}
/****************************************************************
* kaweth_probe
****************************************************************/
static const struct net_device_ops kaweth_netdev_ops = {
.ndo_open = kaweth_open,
.ndo_stop = kaweth_close,
.ndo_start_xmit = kaweth_start_xmit,
.ndo_tx_timeout = kaweth_tx_timeout,
.ndo_set_multicast_list = kaweth_set_rx_mode,
.ndo_get_stats = kaweth_netdev_stats,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int kaweth_probe(
struct usb_interface *intf,
const struct usb_device_id *id /* from id_table */
)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct kaweth_device *kaweth;
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x",
dev->devnum,
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct),
le16_to_cpu(dev->descriptor.bcdDevice));
dbg("Device at %p", dev);
dbg("Descriptor length: %x type: %x",
(int)dev->descriptor.bLength,
(int)dev->descriptor.bDescriptorType);
netdev = alloc_etherdev(sizeof(*kaweth));
if (!netdev)
return -ENOMEM;
kaweth = netdev_priv(netdev);
kaweth->dev = dev;
kaweth->net = netdev;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
dbg("Resetting.");
kaweth_reset(kaweth);
/*
* If high byte of bcdDevice is nonzero, firmware is already
* downloaded. Don't try to do it again, or we'll hang the device.
*/
if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
dev_info(&intf->dev, "Firmware present in device.\n");
} else {
/* Download the firmware */
dev_info(&intf->dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin",
100,
2)) < 0) {
err("Error downloading firmware (%d)", result);
goto err_fw;
}
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code_fix.bin",
100,
3)) < 0) {
err("Error downloading firmware fix (%d)", result);
goto err_fw;
}
if ((result = kaweth_download_firmware(kaweth,
"kaweth/trigger_code.bin",
126,
2)) < 0) {
err("Error downloading trigger code (%d)", result);
goto err_fw;
}
if ((result = kaweth_download_firmware(kaweth,
"kaweth/trigger_code_fix.bin",
126,
3)) < 0) {
err("Error downloading trigger code fix (%d)", result);
goto err_fw;
}
if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
err("Error triggering firmware (%d)", result);
goto err_fw;
}
/* Device will now disappear for a moment... */
dev_info(&intf->dev, "Firmware loaded. I'll be back...\n");
err_fw:
free_page((unsigned long)kaweth->firmware_buf);
free_netdev(netdev);
return -EIO;
}
result = kaweth_read_configuration(kaweth);
if(result < 0) {
err("Error reading configuration (%d), no net device created", result);
goto err_free_netdev;
}
dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
dev_info(&intf->dev, "Read MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
(int)kaweth->configuration.hw_addr[0],
(int)kaweth->configuration.hw_addr[1],
(int)kaweth->configuration.hw_addr[2],
(int)kaweth->configuration.hw_addr[3],
(int)kaweth->configuration.hw_addr[4],
(int)kaweth->configuration.hw_addr[5]);
if(!memcmp(&kaweth->configuration.hw_addr,
&bcast_addr,
sizeof(bcast_addr))) {
err("Firmware not functioning properly, no net device created");
goto err_free_netdev;
}
if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
dbg("Error setting URB size");
goto err_free_netdev;
}
if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
err("Error setting SOFS wait");
goto err_free_netdev;
}
result = kaweth_set_receive_filter(kaweth,
KAWETH_PACKET_FILTER_DIRECTED |
KAWETH_PACKET_FILTER_BROADCAST |
KAWETH_PACKET_FILTER_MULTICAST);
if(result < 0) {
err("Error setting receive filter");
goto err_free_netdev;
}
dbg("Initializing net device.");
kaweth->intf = intf;
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb)
goto err_free_netdev;
kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->rx_urb)
goto err_only_tx;
kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->irq_urb)
goto err_tx_and_rx;
kaweth->intbuffer = usb_alloc_coherent( kaweth->dev,
INTBUFFERSIZE,
GFP_KERNEL,
&kaweth->intbufferhandle);
if (!kaweth->intbuffer)
goto err_tx_and_rx_and_irq;
kaweth->rx_buf = usb_alloc_coherent( kaweth->dev,
KAWETH_BUF_SIZE,
GFP_KERNEL,
&kaweth->rxbufferhandle);
if (!kaweth->rx_buf)
goto err_all_but_rxbuf;
memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
sizeof(kaweth->configuration.hw_addr));
netdev->netdev_ops = &kaweth_netdev_ops;
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
SET_ETHTOOL_OPS(netdev, &ops);
/* kaweth is zeroed as part of alloc_netdev */
INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
usb_set_intfdata(intf, kaweth);
#if 0
// dma_supported() is deeply broken on almost all architectures
if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
kaweth->net->features |= NETIF_F_HIGHDMA;
#endif
SET_NETDEV_DEV(netdev, &intf->dev);
if (register_netdev(netdev) != 0) {
err("Error registering netdev.");
goto err_intfdata;
}
dev_info(&intf->dev, "kaweth interface created at %s\n",
kaweth->net->name);
dbg("Kaweth probe returning.");
return 0;
err_intfdata:
usb_set_intfdata(intf, NULL);
usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
err_all_but_rxbuf:
usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
err_tx_and_rx_and_irq:
usb_free_urb(kaweth->irq_urb);
err_tx_and_rx:
usb_free_urb(kaweth->rx_urb);
err_only_tx:
usb_free_urb(kaweth->tx_urb);
err_free_netdev:
free_netdev(netdev);
return -EIO;
}
/****************************************************************
* kaweth_disconnect
****************************************************************/
static void kaweth_disconnect(struct usb_interface *intf)
{
struct kaweth_device *kaweth = usb_get_intfdata(intf);
struct net_device *netdev;
dev_info(&intf->dev, "Unregistering\n");
usb_set_intfdata(intf, NULL);
if (!kaweth) {
dev_warn(&intf->dev, "unregistering non-existent device\n");
return;
}
netdev = kaweth->net;
dbg("Unregistering net device");
unregister_netdev(netdev);
usb_free_urb(kaweth->rx_urb);
usb_free_urb(kaweth->tx_urb);
usb_free_urb(kaweth->irq_urb);
usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
free_netdev(netdev);
}
// FIXME this completion stuff is a modified clone of
// an OLD version of some stuff in usb.c ...
struct usb_api_data {
wait_queue_head_t wqh;
int done;
};
/*-------------------------------------------------------------------*
* completion handler for compatibility wrappers (sync control/bulk) *
*-------------------------------------------------------------------*/
static void usb_api_blocking_completion(struct urb *urb)
{
struct usb_api_data *awd = (struct usb_api_data *)urb->context;
awd->done=1;
wake_up(&awd->wqh);
}
/*-------------------------------------------------------------------*
* COMPATIBILITY STUFF *
*-------------------------------------------------------------------*/
// Starts urb and waits for completion or timeout
static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
{
struct usb_api_data awd;
int status;
init_waitqueue_head(&awd.wqh);
awd.done = 0;
urb->context = &awd;
status = usb_submit_urb(urb, GFP_NOIO);
if (status) {
// something went wrong
usb_free_urb(urb);
return status;
}
if (!wait_event_timeout(awd.wqh, awd.done, timeout)) {
// timeout
dev_warn(&urb->dev->dev, "usb_control/bulk_msg: timeout\n");
usb_kill_urb(urb); // remove urb safely
status = -ETIMEDOUT;
}
else {
status = urb->status;
}
if (actual_length) {
*actual_length = urb->actual_length;
}
usb_free_urb(urb);
return status;
}
/*-------------------------------------------------------------------*/
// returns status (negative) or length (positive)
static int kaweth_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd, void *data,
int len, int timeout)
{
struct urb *urb;
int retv;
int length = 0; /* shut up GCC */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char*)cmd, data,
len, usb_api_blocking_completion, NULL);
retv = usb_start_wait_urb(urb, timeout, &length);
if (retv < 0) {
return retv;
}
else {
return length;
}
}
/****************************************************************
* kaweth_init
****************************************************************/
static int __init kaweth_init(void)
{
dbg("Driver loading");
return usb_register(&kaweth_driver);
}
/****************************************************************
* kaweth_exit
****************************************************************/
static void __exit kaweth_exit(void)
{
usb_deregister(&kaweth_driver);
}
module_init(kaweth_init);
module_exit(kaweth_exit);
| gpl-2.0 |
lucaspcamargo/litmus-rt | drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c | 1265 | 6706 | /*
* Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/list_sort.h>
#include <linux/interval_tree_generic.h>
#include "usnic_uiom_interval_tree.h"
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
do { \
node = usnic_uiom_interval_node_alloc(start, \
end, ref_cnt, flags); \
if (!node) { \
err = -ENOMEM; \
goto err_out; \
} \
} while (0)
#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
err_out, list) \
do { \
MAKE_NODE(node, start, end, \
ref_cnt, flags, err, \
err_out); \
MARK_FOR_ADD(node, list); \
} while (0)
#define FLAGS_EQUAL(flags1, flags2, mask) \
(((flags1) & (mask)) == ((flags2) & (mask)))
static struct usnic_uiom_interval_node*
usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
int flags)
{
struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
GFP_ATOMIC);
if (!interval)
return NULL;
interval->start = start;
interval->last = last;
interval->flags = flags;
interval->ref_cnt = ref_cnt;
return interval;
}
static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct usnic_uiom_interval_node *node_a, *node_b;
node_a = list_entry(a, struct usnic_uiom_interval_node, link);
node_b = list_entry(b, struct usnic_uiom_interval_node, link);
/* long to int */
if (node_a->start < node_b->start)
return -1;
else if (node_a->start > node_b->start)
return 1;
return 0;
}
static void
find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
unsigned long last,
struct list_head *list)
{
struct usnic_uiom_interval_node *node;
INIT_LIST_HEAD(list);
for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
node;
node = usnic_uiom_interval_tree_iter_next(node, start, last))
list_add_tail(&node->link, list);
list_sort(NULL, list, interval_cmp);
}
int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
int flags, int flag_mask,
struct rb_root *root,
struct list_head *diff_set)
{
struct usnic_uiom_interval_node *interval, *tmp;
int err = 0;
long int pivot = start;
LIST_HEAD(intersection_set);
INIT_LIST_HEAD(diff_set);
find_intervals_intersection_sorted(root, start, last,
&intersection_set);
list_for_each_entry(interval, &intersection_set, link) {
if (pivot < interval->start) {
MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
1, flags, err, err_out,
diff_set);
pivot = interval->start;
}
/*
* Invariant: Set [start, pivot] is either in diff_set or root,
* but not in both.
*/
if (pivot > interval->last) {
continue;
} else if (pivot <= interval->last &&
FLAGS_EQUAL(interval->flags, flags,
flag_mask)) {
pivot = interval->last + 1;
}
}
if (pivot <= last)
MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
diff_set);
return 0;
err_out:
list_for_each_entry_safe(interval, tmp, diff_set, link) {
list_del(&interval->link);
kfree(interval);
}
return err;
}
void usnic_uiom_put_interval_set(struct list_head *intervals)
{
struct usnic_uiom_interval_node *interval, *tmp;
list_for_each_entry_safe(interval, tmp, intervals, link)
kfree(interval);
}
int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
unsigned long last, int flags)
{
struct usnic_uiom_interval_node *interval, *tmp;
unsigned long istart, ilast;
int iref_cnt, iflags;
unsigned long lpivot = start;
int err = 0;
LIST_HEAD(to_add);
LIST_HEAD(intersection_set);
find_intervals_intersection_sorted(root, start, last,
&intersection_set);
list_for_each_entry(interval, &intersection_set, link) {
/*
* Invariant - lpivot is the left edge of next interval to be
* inserted
*/
istart = interval->start;
ilast = interval->last;
iref_cnt = interval->ref_cnt;
iflags = interval->flags;
if (istart < lpivot) {
MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
iflags, err, err_out, &to_add);
} else if (istart > lpivot) {
MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
err, err_out, &to_add);
lpivot = istart;
} else {
lpivot = istart;
}
if (ilast > last) {
MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
iflags | flags, err, err_out,
&to_add);
MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
iflags, err, err_out, &to_add);
} else {
MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
iflags | flags, err, err_out,
&to_add);
}
lpivot = ilast + 1;
}
if (lpivot <= last)
MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
&to_add);
list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
usnic_uiom_interval_tree_remove(interval, root);
kfree(interval);
}
list_for_each_entry(interval, &to_add, link)
usnic_uiom_interval_tree_insert(interval, root);
return 0;
err_out:
list_for_each_entry_safe(interval, tmp, &to_add, link)
kfree(interval);
return err;
}
void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
unsigned long last, struct list_head *removed)
{
struct usnic_uiom_interval_node *interval;
for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
interval;
interval = usnic_uiom_interval_tree_iter_next(interval,
start,
last)) {
if (--interval->ref_cnt == 0)
list_add_tail(&interval->link, removed);
}
list_for_each_entry(interval, removed, link)
usnic_uiom_interval_tree_remove(interval, root);
}
INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
unsigned long, __subtree_last,
START, LAST, , usnic_uiom_interval_tree)
| gpl-2.0 |
alpscale/linux | drivers/usb/host/ehci-mem.c | 1265 | 6608 | /*
* Copyright (c) 2001 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* There's basically three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... single shot DMA mapped
*
* There's also "register" data (e.g. PCI or SOC), which is memory mapped.
* No memory seen by this driver is pageable.
*/
/*-------------------------------------------------------------------------*/
/* Allocate the key transfer structures from the previously allocated pool */
static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
dma_addr_t dma)
{
memset (qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END(ehci);
qtd->hw_alt_next = EHCI_LIST_END(ehci);
INIT_LIST_HEAD (&qtd->qtd_list);
}
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qtd *qtd;
dma_addr_t dma;
qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != NULL) {
ehci_qtd_init(ehci, qtd, dma);
}
return qtd;
}
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
}
static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
BUG ();
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qh *qh;
dma_addr_t dma;
qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
goto done;
qh->hw = (struct ehci_qh_hw *)
dma_pool_alloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
INIT_LIST_HEAD(&qh->unlink_node);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
goto fail1;
}
done:
return qh;
fail1:
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
fail:
kfree(qh);
return NULL;
}
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
if (ehci->async)
qh_destroy(ehci, ehci->async);
ehci->async = NULL;
if (ehci->dummy)
qh_destroy(ehci, ehci->dummy);
ehci->dummy = NULL;
/* DMA consistent memory and pools */
if (ehci->qtd_pool)
dma_pool_destroy (ehci->qtd_pool);
ehci->qtd_pool = NULL;
if (ehci->qh_pool) {
dma_pool_destroy (ehci->qh_pool);
ehci->qh_pool = NULL;
}
if (ehci->itd_pool)
dma_pool_destroy (ehci->itd_pool);
ehci->itd_pool = NULL;
if (ehci->sitd_pool)
dma_pool_destroy (ehci->sitd_pool);
ehci->sitd_pool = NULL;
if (ehci->periodic)
dma_free_coherent (ehci_to_hcd(ehci)->self.controller,
ehci->periodic_size * sizeof (u32),
ehci->periodic, ehci->periodic_dma);
ehci->periodic = NULL;
/* shadow periodic table */
kfree(ehci->pshadow);
ehci->pshadow = NULL;
}
/* remember to add cleanup code (above) if you add anything here */
static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
{
int i;
/* QTDs for control/bulk/intr transfers */
ehci->qtd_pool = dma_pool_create ("ehci_qtd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_qtd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qtd_pool) {
goto fail;
}
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.controller,
sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
goto fail;
}
ehci->async = ehci_qh_alloc (ehci, flags);
if (!ehci->async) {
goto fail;
}
/* ITD for high speed ISO transfers */
ehci->itd_pool = dma_pool_create ("ehci_itd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_itd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->itd_pool) {
goto fail;
}
/* SITD for full/low speed split ISO transfers */
ehci->sitd_pool = dma_pool_create ("ehci_sitd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_sitd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->sitd_pool) {
goto fail;
}
/* Hardware periodic table */
ehci->periodic = (__le32 *)
dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
ehci->periodic_size * sizeof(__le32),
&ehci->periodic_dma, flags);
if (ehci->periodic == NULL) {
goto fail;
}
if (ehci->use_dummy_qh) {
struct ehci_qh_hw *hw;
ehci->dummy = ehci_qh_alloc(ehci, flags);
if (!ehci->dummy)
goto fail;
hw = ehci->dummy->hw;
hw->hw_next = EHCI_LIST_END(ehci);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
hw->hw_alt_next = EHCI_LIST_END(ehci);
ehci->dummy->hw = hw;
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = cpu_to_hc32(ehci,
ehci->dummy->qh_dma);
} else {
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = EHCI_LIST_END(ehci);
}
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
if (ehci->pshadow != NULL)
return 0;
fail:
ehci_dbg (ehci, "couldn't init memory\n");
ehci_mem_cleanup (ehci);
return -ENOMEM;
}
| gpl-2.0 |
nanata1115/linux | drivers/s390/char/tty3270.c | 1265 | 45899 | /*
* IBM/3270 Driver - tty functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* -- Copyright IBM Corp. 2003
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
#include "raw3270.h"
#include "tty3270.h"
#include "keyboard.h"
#define TTY3270_CHAR_BUF_SIZE 256
#define TTY3270_OUTPUT_BUFFER_SIZE 1024
#define TTY3270_STRING_PAGES 5
struct tty_driver *tty3270_driver;
static int tty3270_max_index;
static struct raw3270_fn tty3270_fn;
struct tty3270_cell {
unsigned char character;
unsigned char highlight;
unsigned char f_color;
};
struct tty3270_line {
struct tty3270_cell *cells;
int len;
};
#define ESCAPE_NPAR 8
/*
* The main tty view data structure.
* FIXME:
* 1) describe line orientation & lines list concept against screen
* 2) describe conversion of screen to lines
* 3) describe line format.
*/
struct tty3270 {
struct raw3270_view view;
struct tty_port port;
void **freemem_pages; /* Array of pages used for freemem. */
struct list_head freemem; /* List of free memory for strings. */
/* Output stuff. */
struct list_head lines; /* List of lines. */
struct list_head update; /* List of lines to update. */
unsigned char wcc; /* Write control character. */
int nr_lines; /* # lines in list. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct string *status; /* Lower right of display. */
struct raw3270_request *write; /* Single write request. */
struct timer_list timer; /* Output delay timer. */
/* Current tty screen. */
unsigned int cx, cy; /* Current output position. */
unsigned int highlight; /* Blink/reverse/underscore */
unsigned int f_color; /* Foreground color */
struct tty3270_line *screen;
unsigned int n_model, n_cols, n_rows; /* New model & size */
struct work_struct resize_work;
/* Input stuff. */
struct string *prompt; /* Output string for input area. */
struct string *input; /* Input string for read request. */
struct raw3270_request *read; /* Single read request. */
struct raw3270_request *kreset; /* Single keyboard reset request. */
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
int esc_state, esc_ques, esc_npar;
int esc_par[ESCAPE_NPAR];
unsigned int saved_cx, saved_cy;
unsigned int saved_highlight, saved_f_color;
/* Command recalling. */
struct list_head rcl_lines; /* List of recallable lines. */
struct list_head *rcl_walk; /* Point in rcl_lines list. */
int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
/* Character array for put_char/flush_chars. */
unsigned int char_count;
char char_buf[TTY3270_CHAR_BUF_SIZE];
};
/* tty3270->update_flags. See tty3270_update for details. */
#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define TTY_UPDATE_INPUT 4 /* Update input line. */
#define TTY_UPDATE_STATUS 8 /* Update status line. */
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
static void tty3270_update(struct tty3270 *);
static void tty3270_resize_work(struct work_struct *work);
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
mod_timer(&tp->timer, jiffies + expires);
}
/*
* The input line are the two last lines of the screen.
*/
static void
tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
{
struct string *line;
unsigned int off;
line = tp->prompt;
if (count != 0)
line->string[5] = TF_INMDT;
else
line->string[5] = tp->inattr;
if (count > tp->view.cols * 2 - 11)
count = tp->view.cols * 2 - 11;
memcpy(line->string + 6, input, count);
line->string[6 + count] = TO_IC;
/* Clear to end of input line. */
if (count < tp->view.cols * 2 - 11) {
line->string[7 + count] = TO_RA;
line->string[10 + count] = 0;
off = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
line->len = 11 + count;
} else
line->len = 7 + count;
tp->update_flags |= TTY_UPDATE_INPUT;
}
static void
tty3270_create_prompt(struct tty3270 *tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
/* empty input string */
TO_IC, TO_RA, 0, 0, 0 };
struct string *line;
unsigned int offset;
line = alloc_string(&tp->freemem,
sizeof(blueprint) + tp->view.cols * 2 - 9);
tp->prompt = line;
tp->inattr = TF_INPUT;
/* Copy blueprint to status line */
memcpy(line->string, blueprint, sizeof(blueprint));
line->len = sizeof(blueprint);
/* Set output offsets. */
offset = tp->view.cols * (tp->view.rows - 2);
raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
offset = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
/* Allocate input string for reading. */
tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
}
/*
* The status line is the last line of the screen. It shows the string
* "Running"/"Holding" in the lower right corner of the screen.
*/
static void
tty3270_update_status(struct tty3270 * tp)
{
char *str;
str = (tp->nr_up != 0) ? "History" : "Running";
memcpy(tp->status->string + 8, str, 7);
codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
tp->update_flags |= TTY_UPDATE_STATUS;
}
static void
tty3270_create_status(struct tty3270 * tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
TAC_RESET };
struct string *line;
unsigned int offset;
line = alloc_string(&tp->freemem,sizeof(blueprint));
tp->status = line;
/* Copy blueprint to status line */
memcpy(line->string, blueprint, sizeof(blueprint));
/* Set address to start of status string (= last 9 characters). */
offset = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
}
/*
* Set output offsets to 3270 datastream fragment of a tty string.
* (TO_SBA offset at the start and TO_RA offset at the end of the string)
*/
static void
tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
{
unsigned char *cp;
raw3270_buffer_address(tp->view.dev, line->string + 1,
tp->view.cols * nr);
cp = line->string + line->len - 4;
if (*cp == TO_RA)
raw3270_buffer_address(tp->view.dev, cp + 1,
tp->view.cols * (nr + 1));
}
/*
* Rebuild update list to print all lines.
*/
static void
tty3270_rebuild_update(struct tty3270 *tp)
{
struct string *s, *n;
int line, nr_up;
/*
* Throw away update list and create a new one,
* containing all lines that will fit on the screen.
*/
list_for_each_entry_safe(s, n, &tp->update, update)
list_del_init(&s->update);
line = tp->view.rows - 3;
nr_up = tp->nr_up;
list_for_each_entry_reverse(s, &tp->lines, list) {
if (nr_up > 0) {
nr_up--;
continue;
}
tty3270_update_string(tp, s, line);
list_add(&s->update, &tp->update);
if (--line < 0)
break;
}
tp->update_flags |= TTY_UPDATE_LIST;
}
/*
* Alloc string for size bytes. If there is not enough room in
* freemem, free strings until there is room.
*/
static struct string *
tty3270_alloc_string(struct tty3270 *tp, size_t size)
{
struct string *s, *n;
s = alloc_string(&tp->freemem, size);
if (s)
return s;
list_for_each_entry_safe(s, n, &tp->lines, list) {
BUG_ON(tp->nr_lines <= tp->view.rows - 2);
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
tp->nr_lines--;
if (free_string(&tp->freemem, s) >= size)
break;
}
s = alloc_string(&tp->freemem, size);
BUG_ON(!s);
if (tp->nr_up != 0 &&
tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
tp->nr_up = tp->nr_lines - tp->view.rows + 2;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
}
return s;
}
/*
* Add an empty line to the list.
*/
static void
tty3270_blank_line(struct tty3270 *tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
struct string *s;
s = tty3270_alloc_string(tp, sizeof(blueprint));
memcpy(s->string, blueprint, sizeof(blueprint));
s->len = sizeof(blueprint);
list_add_tail(&s->list, &tp->lines);
tp->nr_lines++;
if (tp->nr_up != 0)
tp->nr_up++;
}
/*
* Write request completion callback.
*/
static void
tty3270_write_callback(struct raw3270_request *rq, void *data)
{
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
if (rq->rc != 0) {
/* Write wasn't successful. Refresh all. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
raw3270_request_reset(rq);
xchg(&tp->write, rq);
}
/*
* Update 3270 display.
*/
static void
tty3270_update(struct tty3270 *tp)
{
static char invalid_sba[2] = { 0xff, 0xff };
struct raw3270_request *wrq;
unsigned long updated;
struct string *s, *n;
char *sba, *str;
int rc, len;
wrq = xchg(&tp->write, 0);
if (!wrq) {
tty3270_set_timer(tp, 1);
return;
}
spin_lock(&tp->view.lock);
updated = 0;
if (tp->update_flags & TTY_UPDATE_ALL) {
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
}
if (tp->update_flags & TTY_UPDATE_ERASE) {
/* Use erase write alternate to erase display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
updated |= TTY_UPDATE_ERASE;
} else
raw3270_request_set_cmd(wrq, TC_WRITE);
raw3270_request_add_data(wrq, &tp->wcc, 1);
tp->wcc = TW_NONE;
/*
* Update status line.
*/
if (tp->update_flags & TTY_UPDATE_STATUS)
if (raw3270_request_add_data(wrq, tp->status->string,
tp->status->len) == 0)
updated |= TTY_UPDATE_STATUS;
/*
* Write input line.
*/
if (tp->update_flags & TTY_UPDATE_INPUT)
if (raw3270_request_add_data(wrq, tp->prompt->string,
tp->prompt->len) == 0)
updated |= TTY_UPDATE_INPUT;
sba = invalid_sba;
if (tp->update_flags & TTY_UPDATE_LIST) {
/* Write strings in the update list to the screen. */
list_for_each_entry_safe(s, n, &tp->update, update) {
str = s->string;
len = s->len;
/*
* Skip TO_SBA at the start of the string if the
* last output position matches the start address
* of this line.
*/
if (s->string[1] == sba[0] && s->string[2] == sba[1])
str += 3, len -= 3;
if (raw3270_request_add_data(wrq, str, len) != 0)
break;
list_del_init(&s->update);
sba = s->string + s->len - 3;
}
if (list_empty(&tp->update))
updated |= TTY_UPDATE_LIST;
}
wrq->callback = tty3270_write_callback;
rc = raw3270_start(&tp->view, wrq);
if (rc == 0) {
tp->update_flags &= ~updated;
if (tp->update_flags)
tty3270_set_timer(tp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&tp->write, wrq);
}
spin_unlock(&tp->view.lock);
}
/*
* Command recalling.
*/
static void
tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
{
struct string *s;
tp->rcl_walk = NULL;
if (len <= 0)
return;
if (tp->rcl_nr >= tp->rcl_max) {
s = list_entry(tp->rcl_lines.next, struct string, list);
list_del(&s->list);
free_string(&tp->freemem, s);
tp->rcl_nr--;
}
s = tty3270_alloc_string(tp, len);
memcpy(s->string, input, len);
list_add_tail(&s->list, &tp->rcl_lines);
tp->rcl_nr++;
}
static void
tty3270_rcl_backward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
struct string *s;
spin_lock_bh(&tp->view.lock);
if (tp->inattr == TF_INPUT) {
if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
tp->rcl_walk = tp->rcl_walk->prev;
else if (!list_empty(&tp->rcl_lines))
tp->rcl_walk = tp->rcl_lines.prev;
s = tp->rcl_walk ?
list_entry(tp->rcl_walk, struct string, list) : NULL;
if (tp->rcl_walk) {
s = list_entry(tp->rcl_walk, struct string, list);
tty3270_update_prompt(tp, s->string, s->len);
} else
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Deactivate tty view.
*/
static void
tty3270_exit_tty(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
raw3270_deactivate_view(&tp->view);
}
/*
* Scroll forward in history.
*/
static void
tty3270_scroll_forward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
int nr_up;
spin_lock_bh(&tp->view.lock);
nr_up = tp->nr_up - tp->view.rows + 2;
if (nr_up < 0)
nr_up = 0;
if (nr_up != tp->nr_up) {
tp->nr_up = nr_up;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Scroll backward in history.
*/
static void
tty3270_scroll_backward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
int nr_up;
spin_lock_bh(&tp->view.lock);
nr_up = tp->nr_up + tp->view.rows - 2;
if (nr_up + tp->view.rows - 2 > tp->nr_lines)
nr_up = tp->nr_lines - tp->view.rows + 2;
if (nr_up != tp->nr_up) {
tp->nr_up = nr_up;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Pass input line to tty.
*/
static void
tty3270_read_tasklet(struct raw3270_request *rrq)
{
static char kreset_data = TW_KR;
struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
char *input;
int len;
spin_lock_bh(&tp->view.lock);
/*
* Two AID keys are special: For 0x7d (enter) the input line
* has to be emitted to the tty and for 0x6d the screen
* needs to be redrawn.
*/
input = NULL;
len = 0;
if (tp->input->string[0] == 0x7d) {
/* Enter: write input to tty. */
input = tp->input->string + 6;
len = tp->input->len - 6 - rrq->rescnt;
if (tp->inattr != TF_INPUTN)
tty3270_rcl_add(tp, input, len);
if (tp->nr_up > 0) {
tp->nr_up = 0;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
}
/* Clear input area. */
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
} else if (tp->input->string[0] == 0x6d) {
/* Display has been cleared. Redraw. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
/* Start keyboard reset command. */
raw3270_request_reset(tp->kreset);
raw3270_request_set_cmd(tp->kreset, TC_WRITE);
raw3270_request_add_data(tp->kreset, &kreset_data, 1);
raw3270_start(&tp->view, tp->kreset);
while (len-- > 0)
kbd_keycode(tp->kbd, *input++);
/* Emit keycode for AID byte. */
kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
raw3270_put_view(&tp->view);
}
/*
* Read request completion callback.
*/
static void
tty3270_read_callback(struct raw3270_request *rq, void *data)
{
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&tp->readlet);
}
/*
* Issue a read request. Call with device lock.
*/
static void
tty3270_issue_read(struct tty3270 *tp, int lock)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&tp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = tty3270_read_callback;
rrq->callback_data = tp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
/* Issue the read modified request. */
if (lock) {
rc = raw3270_start(&tp->view, rrq);
} else
rc = raw3270_start_irq(&tp->view, rrq);
if (rc) {
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
}
}
/*
* Switch to the tty view.
*/
static int
tty3270_activate(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
return 0;
}
static void
tty3270_deactivate(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
del_timer(&tp->timer);
}
static int
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
if (!tp->throttle)
tty3270_issue_read(tp, 0);
else
tp->attn = 1;
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
* Allocate tty3270 structure.
*/
static struct tty3270 *
tty3270_alloc_view(void)
{
struct tty3270 *tp;
int pages;
tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
if (!tp)
goto out_err;
tp->freemem_pages =
kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
if (!tp->freemem_pages)
goto out_tp;
INIT_LIST_HEAD(&tp->freemem);
INIT_LIST_HEAD(&tp->lines);
INIT_LIST_HEAD(&tp->update);
INIT_LIST_HEAD(&tp->rcl_lines);
tp->rcl_max = 20;
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
tp->freemem_pages[pages] = (void *)
__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
if (!tp->freemem_pages[pages])
goto out_pages;
add_string_memory(&tp->freemem,
tp->freemem_pages[pages], PAGE_SIZE);
}
tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
if (IS_ERR(tp->write))
goto out_pages;
tp->read = raw3270_request_alloc(0);
if (IS_ERR(tp->read))
goto out_write;
tp->kreset = raw3270_request_alloc(1);
if (IS_ERR(tp->kreset))
goto out_read;
tp->kbd = kbd_alloc();
if (!tp->kbd)
goto out_reset;
tty_port_init(&tp->port);
setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
(unsigned long) tp);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
return tp;
out_reset:
raw3270_request_free(tp->kreset);
out_read:
raw3270_request_free(tp->read);
out_write:
raw3270_request_free(tp->write);
out_pages:
while (pages--)
free_pages((unsigned long) tp->freemem_pages[pages], 0);
kfree(tp->freemem_pages);
tty_port_destroy(&tp->port);
out_tp:
kfree(tp);
out_err:
return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 structure.
*/
static void
tty3270_free_view(struct tty3270 *tp)
{
int pages;
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
raw3270_request_free(tp->write);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
free_pages((unsigned long) tp->freemem_pages[pages], 0);
kfree(tp->freemem_pages);
tty_port_destroy(&tp->port);
kfree(tp);
}
/*
* Allocate tty3270 screen.
*/
static struct tty3270_line *
tty3270_alloc_screen(unsigned int rows, unsigned int cols)
{
struct tty3270_line *screen;
unsigned long size;
int lines;
size = sizeof(struct tty3270_line) * (rows - 2);
screen = kzalloc(size, GFP_KERNEL);
if (!screen)
goto out_err;
for (lines = 0; lines < rows - 2; lines++) {
size = sizeof(struct tty3270_cell) * cols;
screen[lines].cells = kzalloc(size, GFP_KERNEL);
if (!screen[lines].cells)
goto out_screen;
}
return screen;
out_screen:
while (lines--)
kfree(screen[lines].cells);
kfree(screen);
out_err:
return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 screen.
*/
static void
tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
{
int lines;
for (lines = 0; lines < rows - 2; lines++)
kfree(screen[lines].cells);
kfree(screen);
}
/*
* Resize tty3270 screen
*/
static void tty3270_resize_work(struct work_struct *work)
{
struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
struct tty3270_line *screen, *oscreen;
struct tty_struct *tty;
unsigned int orows;
struct winsize ws;
screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
if (IS_ERR(screen))
return;
/* Switch to new output size */
spin_lock_bh(&tp->view.lock);
oscreen = tp->screen;
orows = tp->view.rows;
tp->view.model = tp->n_model;
tp->view.rows = tp->n_rows;
tp->view.cols = tp->n_cols;
tp->screen = screen;
free_string(&tp->freemem, tp->prompt);
free_string(&tp->freemem, tp->status);
tty3270_create_prompt(tp);
tty3270_create_status(tp);
tp->nr_up = 0;
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
spin_unlock_bh(&tp->view.lock);
tty3270_free_screen(oscreen, orows);
tty3270_set_timer(tp, 1);
/* Informat tty layer about new size */
tty = tty_port_tty_get(&tp->port);
if (!tty)
return;
ws.ws_row = tp->view.rows - 2;
ws.ws_col = tp->view.cols;
tty_do_resize(tty, &ws);
}
static void
tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
tp->n_model = model;
tp->n_rows = rows;
tp->n_cols = cols;
schedule_work(&tp->resize_work);
}
/*
* Unlink tty3270 data structure from tty.
*/
static void
tty3270_release(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
struct tty_struct *tty = tty_port_tty_get(&tp->port);
if (tty) {
tty->driver_data = NULL;
tty_port_tty_set(&tp->port, NULL);
tty_hangup(tty);
raw3270_put_view(&tp->view);
tty_kref_put(tty);
}
}
/*
* Free tty3270 data structure
*/
static void
tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
del_timer_sync(&tp->timer);
tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
/*
* Delayed freeing of tty3270 views.
*/
static void
tty3270_del_views(void)
{
int i;
for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
if (!IS_ERR(view))
raw3270_del_view(view);
}
}
static struct raw3270_fn tty3270_fn = {
.activate = tty3270_activate,
.deactivate = tty3270_deactivate,
.intv = (void *) tty3270_irq,
.release = tty3270_release,
.free = tty3270_free,
.resize = tty3270_resize
};
/*
* This routine is called whenever a 3270 tty is opened first time.
*/
static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct raw3270_view *view;
struct tty3270 *tp;
int i, rc;
/* Check if the tty3270 is already there. */
view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
if (!IS_ERR(view)) {
tp = container_of(view, struct tty3270, view);
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tp->port.low_latency = 0;
/* why to reassign? */
tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
}
if (tty3270_max_index < tty->index + 1)
tty3270_max_index = tty->index + 1;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
tty->index + RAW3270_FIRSTMINOR);
if (rc) {
tty3270_free_view(tp);
return rc;
}
tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
raw3270_del_view(&tp->view);
tty3270_free_view(tp);
return rc;
}
tty_port_tty_set(&tp->port, tty);
tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tty3270_create_prompt(tp);
tty3270_create_status(tp);
tty3270_update_status(tp);
/* Create blank line for every line in the tty output area. */
for (i = 0; i < tp->view.rows - 2; i++)
tty3270_blank_line(tp);
tp->kbd->port = &tp->port;
tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
kbd_ascebc(tp->kbd, tp->view.ascebc);
raw3270_activate_view(&tp->view);
rc = tty_port_install(&tp->port, driver, tty);
if (rc) {
raw3270_put_view(&tp->view);
return rc;
}
tty->driver_data = tp;
return 0;
}
/*
* This routine is called whenever a 3270 tty is opened.
*/
static int
tty3270_open(struct tty_struct *tty, struct file *filp)
{
struct tty3270 *tp = tty->driver_data;
struct tty_port *port = &tp->port;
port->count++;
tty_port_tty_set(port, tty);
return 0;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static void
tty3270_close(struct tty_struct *tty, struct file * filp)
{
struct tty3270 *tp = tty->driver_data;
if (tty->count > 1)
return;
if (tp) {
tty->driver_data = NULL;
tty_port_tty_set(&tp->port, NULL);
}
}
static void tty3270_cleanup(struct tty_struct *tty)
{
struct tty3270 *tp = tty->driver_data;
if (tp)
raw3270_put_view(&tp->view);
}
/*
* We always have room.
*/
static int
tty3270_write_room(struct tty_struct *tty)
{
return INT_MAX;
}
/*
* Insert character into the screen at the current position with the
* current color and highlight. This function does NOT do cursor movement.
*/
static void tty3270_put_character(struct tty3270 *tp, char ch)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tp->screen + tp->cy;
if (line->len <= tp->cx) {
while (line->len < tp->cx) {
cell = line->cells + line->len;
cell->character = tp->view.ascebc[' '];
cell->highlight = tp->highlight;
cell->f_color = tp->f_color;
line->len++;
}
line->len++;
}
cell = line->cells + tp->cx;
cell->character = tp->view.ascebc[(unsigned int) ch];
cell->highlight = tp->highlight;
cell->f_color = tp->f_color;
}
/*
* Convert a tty3270_line to a 3270 data fragment usable for output.
*/
static void
tty3270_convert_line(struct tty3270 *tp, int line_nr)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
struct string *s, *n;
unsigned char highlight;
unsigned char f_color;
char *cp;
int flen, i;
/* Determine how long the fragment will be. */
flen = 3; /* Prefix (TO_SBA). */
line = tp->screen + line_nr;
flen += line->len;
highlight = TAX_RESET;
f_color = TAC_RESET;
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->highlight != highlight) {
flen += 3; /* TO_SA to switch highlight. */
highlight = cell->highlight;
}
if (cell->f_color != f_color) {
flen += 3; /* TO_SA to switch color. */
f_color = cell->f_color;
}
}
if (highlight != TAX_RESET)
flen += 3; /* TO_SA to reset hightlight. */
if (f_color != TAC_RESET)
flen += 3; /* TO_SA to reset color. */
if (line->len < tp->view.cols)
flen += 4; /* Postfix (TO_RA). */
/* Find the line in the list. */
i = tp->view.rows - 2 - line_nr;
list_for_each_entry_reverse(s, &tp->lines, list)
if (--i <= 0)
break;
/*
* Check if the line needs to get reallocated.
*/
if (s->len != flen) {
/* Reallocate string. */
n = tty3270_alloc_string(tp, flen);
list_add(&n->list, &s->list);
list_del_init(&s->list);
if (!list_empty(&s->update))
list_del_init(&s->update);
free_string(&tp->freemem, s);
s = n;
}
/* Write 3270 data fragment. */
cp = s->string;
*cp++ = TO_SBA;
*cp++ = 0;
*cp++ = 0;
highlight = TAX_RESET;
f_color = TAC_RESET;
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->highlight != highlight) {
*cp++ = TO_SA;
*cp++ = TAT_EXTHI;
*cp++ = cell->highlight;
highlight = cell->highlight;
}
if (cell->f_color != f_color) {
*cp++ = TO_SA;
*cp++ = TAT_COLOR;
*cp++ = cell->f_color;
f_color = cell->f_color;
}
*cp++ = cell->character;
}
if (highlight != TAX_RESET) {
*cp++ = TO_SA;
*cp++ = TAT_EXTHI;
*cp++ = TAX_RESET;
}
if (f_color != TAC_RESET) {
*cp++ = TO_SA;
*cp++ = TAT_COLOR;
*cp++ = TAC_RESET;
}
if (line->len < tp->view.cols) {
*cp++ = TO_RA;
*cp++ = 0;
*cp++ = 0;
*cp++ = 0;
}
if (tp->nr_up + line_nr < tp->view.rows - 2) {
/* Line is currently visible on screen. */
tty3270_update_string(tp, s, line_nr);
/* Add line to update list. */
if (list_empty(&s->update)) {
list_add_tail(&s->update, &tp->update);
tp->update_flags |= TTY_UPDATE_LIST;
}
}
}
/*
* Do carriage return.
*/
static void
tty3270_cr(struct tty3270 *tp)
{
tp->cx = 0;
}
/*
* Do line feed.
*/
static void
tty3270_lf(struct tty3270 *tp)
{
struct tty3270_line temp;
int i;
tty3270_convert_line(tp, tp->cy);
if (tp->cy < tp->view.rows - 3) {
tp->cy++;
return;
}
/* Last line just filled up. Add new, blank line. */
tty3270_blank_line(tp);
temp = tp->screen[0];
temp.len = 0;
for (i = 0; i < tp->view.rows - 3; i++)
tp->screen[i] = tp->screen[i+1];
tp->screen[tp->view.rows - 3] = temp;
tty3270_rebuild_update(tp);
}
static void
tty3270_ri(struct tty3270 *tp)
{
if (tp->cy > 0) {
tty3270_convert_line(tp, tp->cy);
tp->cy--;
}
}
/*
* Insert characters at current position.
*/
static void
tty3270_insert_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int k;
line = tp->screen + tp->cy;
while (line->len < tp->cx) {
line->cells[line->len].character = tp->view.ascebc[' '];
line->cells[line->len].highlight = TAX_RESET;
line->cells[line->len].f_color = TAC_RESET;
line->len++;
}
if (n > tp->view.cols - tp->cx)
n = tp->view.cols - tp->cx;
k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
while (k--)
line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
line->len += n;
if (line->len > tp->view.cols)
line->len = tp->view.cols;
while (n-- > 0) {
line->cells[tp->cx + n].character = tp->view.ascebc[' '];
line->cells[tp->cx + n].highlight = tp->highlight;
line->cells[tp->cx + n].f_color = tp->f_color;
}
}
/*
* Delete characters at current position.
*/
static void
tty3270_delete_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int i;
line = tp->screen + tp->cy;
if (line->len <= tp->cx)
return;
if (line->len - tp->cx <= n) {
line->len = tp->cx;
return;
}
for (i = tp->cx; i + n < line->len; i++)
line->cells[i] = line->cells[i + n];
line->len -= n;
}
/*
* Erase characters at current position.
*/
static void
tty3270_erase_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tp->screen + tp->cy;
while (line->len > tp->cx && n-- > 0) {
cell = line->cells + tp->cx++;
cell->character = ' ';
cell->highlight = TAX_RESET;
cell->f_color = TAC_RESET;
}
tp->cx += n;
tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
}
/*
* Erase line, 3 different cases:
* Esc [ 0 K Erase from current position to end of line inclusive
* Esc [ 1 K Erase from beginning of line to current position inclusive
* Esc [ 2 K Erase entire line (without moving cursor)
*/
static void
tty3270_erase_line(struct tty3270 *tp, int mode)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
int i;
line = tp->screen + tp->cy;
if (mode == 0)
line->len = tp->cx;
else if (mode == 1) {
for (i = 0; i < tp->cx; i++) {
cell = line->cells + i;
cell->character = ' ';
cell->highlight = TAX_RESET;
cell->f_color = TAC_RESET;
}
if (line->len <= tp->cx)
line->len = tp->cx + 1;
} else if (mode == 2)
line->len = 0;
tty3270_convert_line(tp, tp->cy);
}
/*
* Erase display, 3 different cases:
* Esc [ 0 J Erase from current position to bottom of screen inclusive
* Esc [ 1 J Erase from top of screen to current position inclusive
* Esc [ 2 J Erase entire screen (without moving the cursor)
*/
static void
tty3270_erase_display(struct tty3270 *tp, int mode)
{
int i;
if (mode == 0) {
tty3270_erase_line(tp, 0);
for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
} else if (mode == 1) {
for (i = 0; i < tp->cy; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
tty3270_erase_line(tp, 1);
} else if (mode == 2) {
for (i = 0; i < tp->view.rows - 2; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
}
tty3270_rebuild_update(tp);
}
/*
* Set attributes found in an escape sequence.
* Esc [ <attr> ; <attr> ; ... m
*/
static void
tty3270_set_attributes(struct tty3270 *tp)
{
static unsigned char f_colors[] = {
TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
};
int i, attr;
for (i = 0; i <= tp->esc_npar; i++) {
attr = tp->esc_par[i];
switch (attr) {
case 0: /* Reset */
tp->highlight = TAX_RESET;
tp->f_color = TAC_RESET;
break;
/* Highlight. */
case 4: /* Start underlining. */
tp->highlight = TAX_UNDER;
break;
case 5: /* Start blink. */
tp->highlight = TAX_BLINK;
break;
case 7: /* Start reverse. */
tp->highlight = TAX_REVER;
break;
case 24: /* End underlining */
if (tp->highlight == TAX_UNDER)
tp->highlight = TAX_RESET;
break;
case 25: /* End blink. */
if (tp->highlight == TAX_BLINK)
tp->highlight = TAX_RESET;
break;
case 27: /* End reverse. */
if (tp->highlight == TAX_REVER)
tp->highlight = TAX_RESET;
break;
/* Foreground color. */
case 30: /* Black */
case 31: /* Red */
case 32: /* Green */
case 33: /* Yellow */
case 34: /* Blue */
case 35: /* Magenta */
case 36: /* Cyan */
case 37: /* White */
case 39: /* Black */
tp->f_color = f_colors[attr - 30];
break;
}
}
}
static inline int
tty3270_getpar(struct tty3270 *tp, int ix)
{
return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
}
static void
tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
{
int max_cx = max(0, cx);
int max_cy = max(0, cy);
tp->cx = min_t(int, tp->view.cols - 1, max_cx);
cy = min_t(int, tp->view.rows - 3, max_cy);
if (cy != tp->cy) {
tty3270_convert_line(tp, tp->cy);
tp->cy = cy;
}
}
/*
* Process escape sequences. Known sequences:
* Esc 7 Save Cursor Position
* Esc 8 Restore Cursor Position
* Esc [ Pn ; Pn ; .. m Set attributes
* Esc [ Pn ; Pn H Cursor Position
* Esc [ Pn ; Pn f Cursor Position
* Esc [ Pn A Cursor Up
* Esc [ Pn B Cursor Down
* Esc [ Pn C Cursor Forward
* Esc [ Pn D Cursor Backward
* Esc [ Pn G Cursor Horizontal Absolute
* Esc [ Pn X Erase Characters
* Esc [ Ps J Erase in Display
* Esc [ Ps K Erase in Line
* // FIXME: add all the new ones.
*
* Pn is a numeric parameter, a string of zero or more decimal digits.
* Ps is a selective parameter.
*/
static void
tty3270_escape_sequence(struct tty3270 *tp, char ch)
{
enum { ESnormal, ESesc, ESsquare, ESgetpars };
if (tp->esc_state == ESnormal) {
if (ch == 0x1b)
/* Starting new escape sequence. */
tp->esc_state = ESesc;
return;
}
if (tp->esc_state == ESesc) {
tp->esc_state = ESnormal;
switch (ch) {
case '[':
tp->esc_state = ESsquare;
break;
case 'E':
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 'M':
tty3270_ri(tp);
break;
case 'D':
tty3270_lf(tp);
break;
case 'Z': /* Respond ID. */
kbd_puts_queue(&tp->port, "\033[?6c");
break;
case '7': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_highlight = tp->highlight;
tp->saved_f_color = tp->f_color;
break;
case '8': /* Restore cursor position. */
tty3270_convert_line(tp, tp->cy);
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->highlight = tp->saved_highlight;
tp->f_color = tp->saved_f_color;
break;
case 'c': /* Reset terminal. */
tp->cx = tp->saved_cx = 0;
tp->cy = tp->saved_cy = 0;
tp->highlight = tp->saved_highlight = TAX_RESET;
tp->f_color = tp->saved_f_color = TAC_RESET;
tty3270_erase_display(tp, 2);
break;
}
return;
}
if (tp->esc_state == ESsquare) {
tp->esc_state = ESgetpars;
memset(tp->esc_par, 0, sizeof(tp->esc_par));
tp->esc_npar = 0;
tp->esc_ques = (ch == '?');
if (tp->esc_ques)
return;
}
if (tp->esc_state == ESgetpars) {
if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
tp->esc_npar++;
return;
}
if (ch >= '0' && ch <= '9') {
tp->esc_par[tp->esc_npar] *= 10;
tp->esc_par[tp->esc_npar] += ch - '0';
return;
}
}
tp->esc_state = ESnormal;
if (ch == 'n' && !tp->esc_ques) {
if (tp->esc_par[0] == 5) /* Status report. */
kbd_puts_queue(&tp->port, "\033[0n");
else if (tp->esc_par[0] == 6) { /* Cursor report. */
char buf[40];
sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
kbd_puts_queue(&tp->port, buf);
}
return;
}
if (tp->esc_ques)
return;
switch (ch) {
case 'm':
tty3270_set_attributes(tp);
break;
case 'H': /* Set cursor position. */
case 'f':
tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
tty3270_getpar(tp, 0) - 1);
break;
case 'd': /* Set y position. */
tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
break;
case 'A': /* Cursor up. */
case 'F':
tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
break;
case 'B': /* Cursor down. */
case 'e':
case 'E':
tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
break;
case 'C': /* Cursor forward. */
case 'a':
tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
break;
case 'D': /* Cursor backward. */
tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
break;
case 'G': /* Set x position. */
case '`':
tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
break;
case 'X': /* Erase Characters. */
tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
break;
case 'J': /* Erase display. */
tty3270_erase_display(tp, tp->esc_par[0]);
break;
case 'K': /* Erase line. */
tty3270_erase_line(tp, tp->esc_par[0]);
break;
case 'P': /* Delete characters. */
tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
break;
case '@': /* Insert characters. */
tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
break;
case 's': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_highlight = tp->highlight;
tp->saved_f_color = tp->f_color;
break;
case 'u': /* Restore cursor position. */
tty3270_convert_line(tp, tp->cy);
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->highlight = tp->saved_highlight;
tp->f_color = tp->saved_f_color;
break;
}
}
/*
* String write routine for 3270 ttys
*/
static void
tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
const unsigned char *buf, int count)
{
int i_msg, i;
spin_lock_bh(&tp->view.lock);
for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) {
if (tp->esc_state != 0) {
/* Continue escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
continue;
}
switch (buf[i_msg]) {
case 0x07: /* '\a' -- Alarm */
tp->wcc |= TW_PLUSALARM;
break;
case 0x08: /* Backspace. */
if (tp->cx > 0) {
tp->cx--;
tty3270_put_character(tp, ' ');
}
break;
case 0x09: /* '\t' -- Tabulate */
for (i = tp->cx % 8; i < 8; i++) {
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
break;
}
tty3270_put_character(tp, ' ');
tp->cx++;
}
break;
case 0x0a: /* '\n' -- New Line */
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 0x0c: /* '\f' -- Form Feed */
tty3270_erase_display(tp, 2);
tp->cx = tp->cy = 0;
break;
case 0x0d: /* '\r' -- Carriage Return */
tp->cx = 0;
break;
case 0x0f: /* SuSE "exit alternate mode" */
break;
case 0x1b: /* Start escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
break;
default: /* Insert normal character. */
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
}
tty3270_put_character(tp, buf[i_msg]);
tp->cx++;
break;
}
}
/* Convert current line to 3270 data fragment. */
tty3270_convert_line(tp, tp->cy);
/* Setup timer to update display after 1/10 second */
if (!timer_pending(&tp->timer))
tty3270_set_timer(tp, HZ/10);
spin_unlock_bh(&tp->view.lock);
}
/*
* String write routine for 3270 ttys
*/
static int
tty3270_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return 0;
if (tp->char_count > 0) {
tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
tty3270_do_write(tp, tty, buf, count);
return count;
}
/*
* Put single characters to the ttys character buffer
*/
static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
return 0;
tp->char_buf[tp->char_count++] = ch;
return 1;
}
/*
* Flush all characters from the ttys characeter buffer put there
* by tty3270_put_char.
*/
static void
tty3270_flush_chars(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
if (tp->char_count > 0) {
tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
}
/*
* Returns the number of characters in the output buffer. This is
* used in tty_wait_until_sent to wait until all characters have
* appeared on the screen.
*/
static int
tty3270_chars_in_buffer(struct tty_struct *tty)
{
return 0;
}
static void
tty3270_flush_buffer(struct tty_struct *tty)
{
}
/*
* Check for visible/invisible input switches
*/
static void
tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct tty3270 *tp;
int new;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_bh(&tp->view.lock);
if (L_ICANON(tty)) {
new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
if (new != tp->inattr) {
tp->inattr = new;
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
}
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Disable reading from a 3270 tty
*/
static void
tty3270_throttle(struct tty_struct * tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 1;
}
/*
* Enable reading from a 3270 tty
*/
static void
tty3270_unthrottle(struct tty_struct * tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 0;
if (tp->attn)
tty3270_issue_read(tp, 1);
}
/*
* Hang up the tty device.
*/
static void
tty3270_hangup(struct tty_struct *tty)
{
// FIXME: implement
}
static void
tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
{
}
static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, arg);
}
#ifdef CONFIG_COMPAT
static long tty3270_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct tty_operations tty3270_ops = {
.install = tty3270_install,
.cleanup = tty3270_cleanup,
.open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
.put_char = tty3270_put_char,
.flush_chars = tty3270_flush_chars,
.write_room = tty3270_write_room,
.chars_in_buffer = tty3270_chars_in_buffer,
.flush_buffer = tty3270_flush_buffer,
.throttle = tty3270_throttle,
.unthrottle = tty3270_unthrottle,
.hangup = tty3270_hangup,
.wait_until_sent = tty3270_wait_until_sent,
.ioctl = tty3270_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tty3270_compat_ioctl,
#endif
.set_termios = tty3270_set_termios
};
static void tty3270_create_cb(int minor)
{
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
}
static void tty3270_destroy_cb(int minor)
{
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
}
static struct raw3270_notifier tty3270_notifier =
{
.create = tty3270_create_cb,
.destroy = tty3270_destroy_cb,
};
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
*/
static int __init tty3270_init(void)
{
struct tty_driver *driver;
int ret;
driver = tty_alloc_driver(RAW3270_MAXDEVS,
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_RESET_TERMIOS);
if (IS_ERR(driver))
return PTR_ERR(driver);
/*
* Initialize the tty_driver structure
* Entries in tty3270_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
driver->driver_name = "tty3270";
driver->name = "3270/tty";
driver->major = IBM_TTY3270_MAJOR;
driver->minor_start = RAW3270_FIRSTMINOR;
driver->name_base = RAW3270_FIRSTMINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
put_tty_driver(driver);
return ret;
}
tty3270_driver = driver;
raw3270_register_notifier(&tty3270_notifier);
return 0;
}
static void __exit
tty3270_exit(void)
{
struct tty_driver *driver;
raw3270_unregister_notifier(&tty3270_notifier);
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
put_tty_driver(driver);
tty3270_del_views();
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
module_init(tty3270_init);
module_exit(tty3270_exit);
| gpl-2.0 |
renzoolivares/android_kernel_htc_monarudo | drivers/hid/hid-wiimote-core.c | 2033 | 33857 | /*
* HID driver for Nintendo Wiimote devices
* Copyright (c) 2011 David Herrmann
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
#include "hid-wiimote.h"
enum wiiproto_keys {
WIIPROTO_KEY_LEFT,
WIIPROTO_KEY_RIGHT,
WIIPROTO_KEY_UP,
WIIPROTO_KEY_DOWN,
WIIPROTO_KEY_PLUS,
WIIPROTO_KEY_MINUS,
WIIPROTO_KEY_ONE,
WIIPROTO_KEY_TWO,
WIIPROTO_KEY_A,
WIIPROTO_KEY_B,
WIIPROTO_KEY_HOME,
WIIPROTO_KEY_COUNT
};
static __u16 wiiproto_keymap[] = {
KEY_LEFT, /* WIIPROTO_KEY_LEFT */
KEY_RIGHT, /* WIIPROTO_KEY_RIGHT */
KEY_UP, /* WIIPROTO_KEY_UP */
KEY_DOWN, /* WIIPROTO_KEY_DOWN */
KEY_NEXT, /* WIIPROTO_KEY_PLUS */
KEY_PREVIOUS, /* WIIPROTO_KEY_MINUS */
BTN_1, /* WIIPROTO_KEY_ONE */
BTN_2, /* WIIPROTO_KEY_TWO */
BTN_A, /* WIIPROTO_KEY_A */
BTN_B, /* WIIPROTO_KEY_B */
BTN_MODE, /* WIIPROTO_KEY_HOME */
};
static enum power_supply_property wiimote_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_SCOPE,
};
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
__u8 *buf;
ssize_t ret;
if (!hdev->hid_output_raw_report)
return -ENODEV;
buf = kmemdup(buffer, count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
kfree(buf);
return ret;
}
static void wiimote_worker(struct work_struct *work)
{
struct wiimote_data *wdata = container_of(work, struct wiimote_data,
worker);
unsigned long flags;
spin_lock_irqsave(&wdata->qlock, flags);
while (wdata->head != wdata->tail) {
spin_unlock_irqrestore(&wdata->qlock, flags);
wiimote_hid_send(wdata->hdev, wdata->outq[wdata->tail].data,
wdata->outq[wdata->tail].size);
spin_lock_irqsave(&wdata->qlock, flags);
wdata->tail = (wdata->tail + 1) % WIIMOTE_BUFSIZE;
}
spin_unlock_irqrestore(&wdata->qlock, flags);
}
static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
size_t count)
{
unsigned long flags;
__u8 newhead;
if (count > HID_MAX_BUFFER_SIZE) {
hid_warn(wdata->hdev, "Sending too large output report\n");
return;
}
/*
* Copy new request into our output queue and check whether the
* queue is full. If it is full, discard this request.
* If it is empty we need to start a new worker that will
* send out the buffer to the hid device.
* If the queue is not empty, then there must be a worker
* that is currently sending out our buffer and this worker
* will reschedule itself until the queue is empty.
*/
spin_lock_irqsave(&wdata->qlock, flags);
memcpy(wdata->outq[wdata->head].data, buffer, count);
wdata->outq[wdata->head].size = count;
newhead = (wdata->head + 1) % WIIMOTE_BUFSIZE;
if (wdata->head == wdata->tail) {
wdata->head = newhead;
schedule_work(&wdata->worker);
} else if (newhead != wdata->tail) {
wdata->head = newhead;
} else {
hid_warn(wdata->hdev, "Output queue is full");
}
spin_unlock_irqrestore(&wdata->qlock, flags);
}
/*
* This sets the rumble bit on the given output report if rumble is
* currently enabled.
* \cmd1 must point to the second byte in the output report => &cmd[1]
* This must be called on nearly every output report before passing it
* into the output queue!
*/
static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
{
if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
*cmd1 |= 0x01;
}
static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
{
__u8 cmd[2];
rumble = !!rumble;
if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
return;
if (rumble)
wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
else
wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
cmd[0] = WIIPROTO_REQ_RUMBLE;
cmd[1] = 0;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
{
__u8 cmd[2];
leds &= WIIPROTO_FLAGS_LEDS;
if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds)
return;
wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds;
cmd[0] = WIIPROTO_REQ_LED;
cmd[1] = 0;
if (leds & WIIPROTO_FLAG_LED1)
cmd[1] |= 0x10;
if (leds & WIIPROTO_FLAG_LED2)
cmd[1] |= 0x20;
if (leds & WIIPROTO_FLAG_LED3)
cmd[1] |= 0x40;
if (leds & WIIPROTO_FLAG_LED4)
cmd[1] |= 0x80;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
/*
* Check what peripherals of the wiimote are currently
* active and select a proper DRM that supports all of
* the requested data inputs.
*/
static __u8 select_drm(struct wiimote_data *wdata)
{
__u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
bool ext = wiiext_active(wdata);
if (ir == WIIPROTO_FLAG_IR_BASIC) {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
return WIIPROTO_REQ_DRM_KAIE;
else
return WIIPROTO_REQ_DRM_KIE;
} else if (ir == WIIPROTO_FLAG_IR_EXT) {
return WIIPROTO_REQ_DRM_KAI;
} else if (ir == WIIPROTO_FLAG_IR_FULL) {
return WIIPROTO_REQ_DRM_SKAI1;
} else {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
if (ext)
return WIIPROTO_REQ_DRM_KAE;
else
return WIIPROTO_REQ_DRM_KA;
} else {
if (ext)
return WIIPROTO_REQ_DRM_KE;
else
return WIIPROTO_REQ_DRM_K;
}
}
}
void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
{
__u8 cmd[3];
if (drm == WIIPROTO_REQ_NULL)
drm = select_drm(wdata);
cmd[0] = WIIPROTO_REQ_DRM;
cmd[1] = 0;
cmd[2] = drm;
wdata->state.drm = drm;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
static void wiiproto_req_status(struct wiimote_data *wdata)
{
__u8 cmd[2];
cmd[0] = WIIPROTO_REQ_SREQ;
cmd[1] = 0;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
{
accel = !!accel;
if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
return;
if (accel)
wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
else
wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
}
static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
{
__u8 cmd[2];
cmd[0] = WIIPROTO_REQ_IR1;
cmd[1] = flags;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
{
__u8 cmd[2];
cmd[0] = WIIPROTO_REQ_IR2;
cmd[1] = flags;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
#define wiiproto_req_wreg(wdata, os, buf, sz) \
wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
#define wiiproto_req_weeprom(wdata, os, buf, sz) \
wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
__u32 offset, const __u8 *buf, __u8 size)
{
__u8 cmd[22];
if (size > 16 || size == 0) {
hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
return;
}
memset(cmd, 0, sizeof(cmd));
cmd[0] = WIIPROTO_REQ_WMEM;
cmd[2] = (offset >> 16) & 0xff;
cmd[3] = (offset >> 8) & 0xff;
cmd[4] = offset & 0xff;
cmd[5] = size;
memcpy(&cmd[6], buf, size);
if (!eeprom)
cmd[1] |= 0x04;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset,
__u16 size)
{
__u8 cmd[7];
if (size == 0) {
hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size);
return;
}
cmd[0] = WIIPROTO_REQ_RMEM;
cmd[1] = 0;
cmd[2] = (offset >> 16) & 0xff;
cmd[3] = (offset >> 8) & 0xff;
cmd[4] = offset & 0xff;
cmd[5] = (size >> 8) & 0xff;
cmd[6] = size & 0xff;
if (!eeprom)
cmd[1] |= 0x04;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
/* requries the cmd-mutex to be held */
int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
const __u8 *wmem, __u8 size)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&wdata->state.lock, flags);
wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
wiiproto_req_wreg(wdata, offset, wmem, size);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
if (!ret && wdata->state.cmd_err)
ret = -EIO;
return ret;
}
/* requries the cmd-mutex to be held */
ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem,
__u8 size)
{
unsigned long flags;
ssize_t ret;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->state.cmd_read_size = size;
wdata->state.cmd_read_buf = rmem;
wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff);
wiiproto_req_rreg(wdata, offset, size);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->state.cmd_read_buf = NULL;
spin_unlock_irqrestore(&wdata->state.lock, flags);
if (!ret) {
if (wdata->state.cmd_read_size == 0)
ret = -EIO;
else
ret = wdata->state.cmd_read_size;
}
return ret;
}
static int wiimote_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct wiimote_data *wdata = container_of(psy,
struct wiimote_data, battery);
int ret = 0, state;
unsigned long flags;
if (psp == POWER_SUPPLY_PROP_SCOPE) {
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
return 0;
}
ret = wiimote_cmd_acquire(wdata);
if (ret)
return ret;
spin_lock_irqsave(&wdata->state.lock, flags);
wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
wiiproto_req_status(wdata);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
state = wdata->state.cmd_battery;
wiimote_cmd_release(wdata);
if (ret)
return ret;
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = state * 100 / 255;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
{
int ret;
unsigned long flags;
__u8 format = 0;
static const __u8 data_enable[] = { 0x01 };
static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
0x00, 0xaa, 0x00, 0x64 };
static const __u8 data_sens2[] = { 0x63, 0x03 };
static const __u8 data_fin[] = { 0x08 };
spin_lock_irqsave(&wdata->state.lock, flags);
if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
}
if (mode == 0) {
wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
wiiproto_req_ir1(wdata, 0);
wiiproto_req_ir2(wdata, 0);
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
}
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_acquire(wdata);
if (ret)
return ret;
/* send PIXEL CLOCK ENABLE cmd first */
spin_lock_irqsave(&wdata->state.lock, flags);
wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
wiiproto_req_ir1(wdata, 0x06);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
if (ret)
goto unlock;
if (wdata->state.cmd_err) {
ret = -EIO;
goto unlock;
}
/* enable IR LOGIC */
spin_lock_irqsave(&wdata->state.lock, flags);
wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
wiiproto_req_ir2(wdata, 0x06);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
if (ret)
goto unlock;
if (wdata->state.cmd_err) {
ret = -EIO;
goto unlock;
}
/* enable IR cam but do not make it send data, yet */
ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
sizeof(data_enable));
if (ret)
goto unlock;
/* write first sensitivity block */
ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
sizeof(data_sens1));
if (ret)
goto unlock;
/* write second sensitivity block */
ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
sizeof(data_sens2));
if (ret)
goto unlock;
/* put IR cam into desired state */
switch (mode) {
case WIIPROTO_FLAG_IR_FULL:
format = 5;
break;
case WIIPROTO_FLAG_IR_EXT:
format = 3;
break;
case WIIPROTO_FLAG_IR_BASIC:
format = 1;
break;
}
ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
if (ret)
goto unlock;
/* make IR cam send data */
ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
if (ret)
goto unlock;
/* request new DRM mode compatible to IR mode */
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
spin_unlock_irqrestore(&wdata->state.lock, flags);
unlock:
wiimote_cmd_release(wdata);
return ret;
}
static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
{
struct wiimote_data *wdata;
struct device *dev = led_dev->dev->parent;
int i;
unsigned long flags;
bool value = false;
wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
for (i = 0; i < 4; ++i) {
if (wdata->leds[i] == led_dev) {
spin_lock_irqsave(&wdata->state.lock, flags);
value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
spin_unlock_irqrestore(&wdata->state.lock, flags);
break;
}
}
return value ? LED_FULL : LED_OFF;
}
static void wiimote_leds_set(struct led_classdev *led_dev,
enum led_brightness value)
{
struct wiimote_data *wdata;
struct device *dev = led_dev->dev->parent;
int i;
unsigned long flags;
__u8 state, flag;
wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
for (i = 0; i < 4; ++i) {
if (wdata->leds[i] == led_dev) {
flag = WIIPROTO_FLAG_LED(i + 1);
spin_lock_irqsave(&wdata->state.lock, flags);
state = wdata->state.flags;
if (value == LED_OFF)
wiiproto_req_leds(wdata, state & ~flag);
else
wiiproto_req_leds(wdata, state | flag);
spin_unlock_irqrestore(&wdata->state.lock, flags);
break;
}
}
}
static int wiimote_ff_play(struct input_dev *dev, void *data,
struct ff_effect *eff)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
__u8 value;
unsigned long flags;
/*
* The wiimote supports only a single rumble motor so if any magnitude
* is set to non-zero then we start the rumble motor. If both are set to
* zero, we stop the rumble motor.
*/
if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
value = 1;
else
value = 0;
spin_lock_irqsave(&wdata->state.lock, flags);
wiiproto_req_rumble(wdata, value);
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
}
static int wiimote_input_open(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
return hid_hw_open(wdata->hdev);
}
static void wiimote_input_close(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
hid_hw_close(wdata->hdev);
}
static int wiimote_accel_open(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
int ret;
unsigned long flags;
ret = hid_hw_open(wdata->hdev);
if (ret)
return ret;
spin_lock_irqsave(&wdata->state.lock, flags);
wiiproto_req_accel(wdata, true);
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
}
static void wiimote_accel_close(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&wdata->state.lock, flags);
wiiproto_req_accel(wdata, false);
spin_unlock_irqrestore(&wdata->state.lock, flags);
hid_hw_close(wdata->hdev);
}
static int wiimote_ir_open(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
int ret;
ret = hid_hw_open(wdata->hdev);
if (ret)
return ret;
ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
if (ret) {
hid_hw_close(wdata->hdev);
return ret;
}
return 0;
}
static void wiimote_ir_close(struct input_dev *dev)
{
struct wiimote_data *wdata = input_get_drvdata(dev);
wiimote_init_ir(wdata, 0);
hid_hw_close(wdata->hdev);
}
static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
{
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
!!(payload[0] & 0x01));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_RIGHT],
!!(payload[0] & 0x02));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_DOWN],
!!(payload[0] & 0x04));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_UP],
!!(payload[0] & 0x08));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_PLUS],
!!(payload[0] & 0x10));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_TWO],
!!(payload[1] & 0x01));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_ONE],
!!(payload[1] & 0x02));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_B],
!!(payload[1] & 0x04));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_A],
!!(payload[1] & 0x08));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_MINUS],
!!(payload[1] & 0x10));
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_HOME],
!!(payload[1] & 0x80));
input_sync(wdata->input);
}
static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
{
__u16 x, y, z;
if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
return;
/*
* payload is: BB BB XX YY ZZ
* Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
* contain the upper 8 bits of each value. The lower 2 bits are
* contained in the buttons data BB BB.
* Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
* X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
* accel value and bit 6 is the second bit of the Z value.
* The first bit of Y and Z values is not available and always set to 0.
* 0x200 is returned on no movement.
*/
x = payload[2] << 2;
y = payload[3] << 2;
z = payload[4] << 2;
x |= (payload[0] >> 5) & 0x3;
y |= (payload[1] >> 4) & 0x2;
z |= (payload[1] >> 5) & 0x2;
input_report_abs(wdata->accel, ABS_RX, x - 0x200);
input_report_abs(wdata->accel, ABS_RY, y - 0x200);
input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
input_sync(wdata->accel);
}
#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
ABS_HAT0X, ABS_HAT0Y)
#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
ABS_HAT1X, ABS_HAT1Y)
#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
ABS_HAT2X, ABS_HAT2Y)
#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
ABS_HAT3X, ABS_HAT3Y)
static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
bool packed, __u8 xid, __u8 yid)
{
__u16 x, y;
if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
return;
/*
* Basic IR data is encoded into 3 bytes. The first two bytes are the
* lower 8 bit of the X/Y data, the 3rd byte contains the upper 2 bits
* of both.
* If data is packed, then the 3rd byte is put first and slightly
* reordered. This allows to interleave packed and non-packed data to
* have two IR sets in 5 bytes instead of 6.
* The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
*/
if (packed) {
x = ir[1] | ((ir[0] & 0x03) << 8);
y = ir[2] | ((ir[0] & 0x0c) << 6);
} else {
x = ir[0] | ((ir[2] & 0x30) << 4);
y = ir[1] | ((ir[2] & 0xc0) << 2);
}
input_report_abs(wdata->ir, xid, x);
input_report_abs(wdata->ir, yid, y);
}
static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
wiiext_event(wdata, payload[2] & 0x02);
if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
wdata->state.cmd_battery = payload[5];
wiimote_cmd_complete(wdata);
}
}
static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
{
__u16 offset = payload[3] << 8 | payload[4];
__u8 size = (payload[2] >> 4) + 1;
__u8 err = payload[2] & 0x0f;
handler_keys(wdata, payload);
if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) {
if (err)
size = 0;
else if (size > wdata->state.cmd_read_size)
size = wdata->state.cmd_read_size;
wdata->state.cmd_read_size = size;
if (wdata->state.cmd_read_buf)
memcpy(wdata->state.cmd_read_buf, &payload[5], size);
wiimote_cmd_complete(wdata);
}
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
{
__u8 err = payload[3];
__u8 cmd = payload[2];
handler_keys(wdata, payload);
if (wiimote_cmd_pending(wdata, cmd, 0)) {
wdata->state.cmd_err = err;
wiimote_cmd_complete(wdata);
} else if (err) {
hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
cmd);
}
}
static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
}
static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
ir_to_input0(wdata, &payload[5], false);
ir_to_input1(wdata, &payload[8], false);
ir_to_input2(wdata, &payload[11], false);
ir_to_input3(wdata, &payload[14], false);
input_sync(wdata->ir);
}
static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
ir_to_input0(wdata, &payload[2], false);
ir_to_input1(wdata, &payload[4], true);
ir_to_input2(wdata, &payload[7], false);
ir_to_input3(wdata, &payload[9], true);
input_sync(wdata->ir);
wiiext_handle(wdata, &payload[12]);
}
static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
wiiext_handle(wdata, &payload[5]);
}
static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
ir_to_input0(wdata, &payload[5], false);
ir_to_input1(wdata, &payload[7], true);
ir_to_input2(wdata, &payload[10], false);
ir_to_input3(wdata, &payload[12], true);
input_sync(wdata->ir);
wiiext_handle(wdata, &payload[15]);
}
static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
{
wiiext_handle(wdata, payload);
}
static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
wdata->state.accel_split[0] = payload[2];
wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
ir_to_input0(wdata, &payload[3], false);
ir_to_input1(wdata, &payload[12], false);
input_sync(wdata->ir);
}
static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
{
__u8 buf[5];
handler_keys(wdata, payload);
wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
buf[0] = 0;
buf[1] = 0;
buf[2] = wdata->state.accel_split[0];
buf[3] = payload[2];
buf[4] = wdata->state.accel_split[1];
handler_accel(wdata, buf);
ir_to_input2(wdata, &payload[3], false);
ir_to_input3(wdata, &payload[12], false);
input_sync(wdata->ir);
}
struct wiiproto_handler {
__u8 id;
size_t size;
void (*func)(struct wiimote_data *wdata, const __u8 *payload);
};
static struct wiiproto_handler handlers[] = {
{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
{ .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
{ .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
{ .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
{ .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
{ .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
{ .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
{ .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
{ .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
{ .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
{ .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
{ .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
{ .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
{ .id = 0 }
};
static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
struct wiimote_data *wdata = hid_get_drvdata(hdev);
struct wiiproto_handler *h;
int i;
unsigned long flags;
bool handled = false;
if (size < 1)
return -EINVAL;
spin_lock_irqsave(&wdata->state.lock, flags);
for (i = 0; handlers[i].id; ++i) {
h = &handlers[i];
if (h->id == raw_data[0] && h->size < size) {
h->func(wdata, &raw_data[1]);
handled = true;
}
}
if (!handled)
hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
size);
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
}
static void wiimote_leds_destroy(struct wiimote_data *wdata)
{
int i;
struct led_classdev *led;
for (i = 0; i < 4; ++i) {
if (wdata->leds[i]) {
led = wdata->leds[i];
wdata->leds[i] = NULL;
led_classdev_unregister(led);
kfree(led);
}
}
}
static int wiimote_leds_create(struct wiimote_data *wdata)
{
int i, ret;
struct device *dev = &wdata->hdev->dev;
size_t namesz = strlen(dev_name(dev)) + 9;
struct led_classdev *led;
char *name;
for (i = 0; i < 4; ++i) {
led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
if (!led) {
ret = -ENOMEM;
goto err;
}
name = (void*)&led[1];
snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
led->name = name;
led->brightness = 0;
led->max_brightness = 1;
led->brightness_get = wiimote_leds_get;
led->brightness_set = wiimote_leds_set;
ret = led_classdev_register(dev, led);
if (ret) {
kfree(led);
goto err;
}
wdata->leds[i] = led;
}
return 0;
err:
wiimote_leds_destroy(wdata);
return ret;
}
static struct wiimote_data *wiimote_create(struct hid_device *hdev)
{
struct wiimote_data *wdata;
int i;
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (!wdata)
return NULL;
wdata->input = input_allocate_device();
if (!wdata->input)
goto err;
wdata->hdev = hdev;
hid_set_drvdata(hdev, wdata);
input_set_drvdata(wdata->input, wdata);
wdata->input->open = wiimote_input_open;
wdata->input->close = wiimote_input_close;
wdata->input->dev.parent = &wdata->hdev->dev;
wdata->input->id.bustype = wdata->hdev->bus;
wdata->input->id.vendor = wdata->hdev->vendor;
wdata->input->id.product = wdata->hdev->product;
wdata->input->id.version = wdata->hdev->version;
wdata->input->name = WIIMOTE_NAME;
set_bit(EV_KEY, wdata->input->evbit);
for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
set_bit(wiiproto_keymap[i], wdata->input->keybit);
set_bit(FF_RUMBLE, wdata->input->ffbit);
if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
goto err_input;
wdata->accel = input_allocate_device();
if (!wdata->accel)
goto err_input;
input_set_drvdata(wdata->accel, wdata);
wdata->accel->open = wiimote_accel_open;
wdata->accel->close = wiimote_accel_close;
wdata->accel->dev.parent = &wdata->hdev->dev;
wdata->accel->id.bustype = wdata->hdev->bus;
wdata->accel->id.vendor = wdata->hdev->vendor;
wdata->accel->id.product = wdata->hdev->product;
wdata->accel->id.version = wdata->hdev->version;
wdata->accel->name = WIIMOTE_NAME " Accelerometer";
set_bit(EV_ABS, wdata->accel->evbit);
set_bit(ABS_RX, wdata->accel->absbit);
set_bit(ABS_RY, wdata->accel->absbit);
set_bit(ABS_RZ, wdata->accel->absbit);
input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
wdata->ir = input_allocate_device();
if (!wdata->ir)
goto err_ir;
input_set_drvdata(wdata->ir, wdata);
wdata->ir->open = wiimote_ir_open;
wdata->ir->close = wiimote_ir_close;
wdata->ir->dev.parent = &wdata->hdev->dev;
wdata->ir->id.bustype = wdata->hdev->bus;
wdata->ir->id.vendor = wdata->hdev->vendor;
wdata->ir->id.product = wdata->hdev->product;
wdata->ir->id.version = wdata->hdev->version;
wdata->ir->name = WIIMOTE_NAME " IR";
set_bit(EV_ABS, wdata->ir->evbit);
set_bit(ABS_HAT0X, wdata->ir->absbit);
set_bit(ABS_HAT0Y, wdata->ir->absbit);
set_bit(ABS_HAT1X, wdata->ir->absbit);
set_bit(ABS_HAT1Y, wdata->ir->absbit);
set_bit(ABS_HAT2X, wdata->ir->absbit);
set_bit(ABS_HAT2Y, wdata->ir->absbit);
set_bit(ABS_HAT3X, wdata->ir->absbit);
set_bit(ABS_HAT3Y, wdata->ir->absbit);
input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
spin_lock_init(&wdata->qlock);
INIT_WORK(&wdata->worker, wiimote_worker);
spin_lock_init(&wdata->state.lock);
init_completion(&wdata->state.ready);
mutex_init(&wdata->state.sync);
wdata->state.drm = WIIPROTO_REQ_DRM_K;
return wdata;
err_ir:
input_free_device(wdata->accel);
err_input:
input_free_device(wdata->input);
err:
kfree(wdata);
return NULL;
}
static void wiimote_destroy(struct wiimote_data *wdata)
{
wiidebug_deinit(wdata);
wiiext_deinit(wdata);
wiimote_leds_destroy(wdata);
power_supply_unregister(&wdata->battery);
input_unregister_device(wdata->accel);
input_unregister_device(wdata->ir);
input_unregister_device(wdata->input);
cancel_work_sync(&wdata->worker);
hid_hw_stop(wdata->hdev);
kfree(wdata);
}
static int wiimote_hid_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
struct wiimote_data *wdata;
int ret;
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
wdata = wiimote_create(hdev);
if (!wdata) {
hid_err(hdev, "Can't alloc device\n");
return -ENOMEM;
}
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "HID parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
if (ret) {
hid_err(hdev, "HW start failed\n");
goto err;
}
ret = input_register_device(wdata->accel);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_stop;
}
ret = input_register_device(wdata->ir);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_ir;
}
ret = input_register_device(wdata->input);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_input;
}
wdata->battery.properties = wiimote_battery_props;
wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
wdata->battery.get_property = wiimote_battery_get_property;
wdata->battery.name = "wiimote_battery";
wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
wdata->battery.use_for_apm = 0;
ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
if (ret) {
hid_err(hdev, "Cannot register battery device\n");
goto err_battery;
}
power_supply_powers(&wdata->battery, &hdev->dev);
ret = wiimote_leds_create(wdata);
if (ret)
goto err_free;
ret = wiiext_init(wdata);
if (ret)
goto err_free;
ret = wiidebug_init(wdata);
if (ret)
goto err_free;
hid_info(hdev, "New device registered\n");
/* by default set led1 after device initialization */
spin_lock_irq(&wdata->state.lock);
wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1);
spin_unlock_irq(&wdata->state.lock);
return 0;
err_free:
wiimote_destroy(wdata);
return ret;
err_battery:
input_unregister_device(wdata->input);
wdata->input = NULL;
err_input:
input_unregister_device(wdata->ir);
wdata->ir = NULL;
err_ir:
input_unregister_device(wdata->accel);
wdata->accel = NULL;
err_stop:
hid_hw_stop(hdev);
err:
input_free_device(wdata->ir);
input_free_device(wdata->accel);
input_free_device(wdata->input);
kfree(wdata);
return ret;
}
static void wiimote_hid_remove(struct hid_device *hdev)
{
struct wiimote_data *wdata = hid_get_drvdata(hdev);
hid_info(hdev, "Device removed\n");
wiimote_destroy(wdata);
}
static const struct hid_device_id wiimote_hid_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
static struct hid_driver wiimote_hid_driver = {
.name = "wiimote",
.id_table = wiimote_hid_devices,
.probe = wiimote_hid_probe,
.remove = wiimote_hid_remove,
.raw_event = wiimote_hid_event,
};
static int __init wiimote_init(void)
{
int ret;
ret = hid_register_driver(&wiimote_hid_driver);
if (ret)
pr_err("Can't register wiimote hid driver\n");
return ret;
}
static void __exit wiimote_exit(void)
{
hid_unregister_driver(&wiimote_hid_driver);
}
module_init(wiimote_init);
module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
| gpl-2.0 |
neumeika/kernel_hws10101l | drivers/tty/vt/consolemap.c | 3313 | 22885 | /*
* consolemap.c
*
* Mapping from internal code (such as Latin-1 or Unicode or IBM PC code)
* to font positions.
*
* aeb, 950210
*
* Support for multiple unimaps by Jakub Jelinek <jj@ultra.linux.cz>, July 1998
*
* Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
*/
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <asm/uaccess.h>
#include <linux/consolemap.h>
#include <linux/vt_kern.h>
static unsigned short translations[][256] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* VT100 graphics mapped to Unicode */
{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x2192, 0x2190, 0x2191, 0x2193, 0x002f,
0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x00a0,
0x25c6, 0x2592, 0x2409, 0x240c, 0x240d, 0x240a, 0x00b0, 0x00b1,
0x2591, 0x240b, 0x2518, 0x2510, 0x250c, 0x2514, 0x253c, 0x23ba,
0x23bb, 0x2500, 0x23bc, 0x23bd, 0x251c, 0x2524, 0x2534, 0x252c,
0x2502, 0x2264, 0x2265, 0x03c0, 0x2260, 0x00a3, 0x00b7, 0x007f,
0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
{
0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x2302,
0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7,
0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5,
0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192,
0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510,
0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567,
0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580,
0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
},
/* User mapping -- default to codes for direct font mapping */
{
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
0xf018, 0xf019, 0xf01a, 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
0xf028, 0xf029, 0xf02a, 0xf02b, 0xf02c, 0xf02d, 0xf02e, 0xf02f,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf03a, 0xf03b, 0xf03c, 0xf03d, 0xf03e, 0xf03f,
0xf040, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f,
0xf050, 0xf051, 0xf052, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057,
0xf058, 0xf059, 0xf05a, 0xf05b, 0xf05c, 0xf05d, 0xf05e, 0xf05f,
0xf060, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f,
0xf070, 0xf071, 0xf072, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077,
0xf078, 0xf079, 0xf07a, 0xf07b, 0xf07c, 0xf07d, 0xf07e, 0xf07f,
0xf080, 0xf081, 0xf082, 0xf083, 0xf084, 0xf085, 0xf086, 0xf087,
0xf088, 0xf089, 0xf08a, 0xf08b, 0xf08c, 0xf08d, 0xf08e, 0xf08f,
0xf090, 0xf091, 0xf092, 0xf093, 0xf094, 0xf095, 0xf096, 0xf097,
0xf098, 0xf099, 0xf09a, 0xf09b, 0xf09c, 0xf09d, 0xf09e, 0xf09f,
0xf0a0, 0xf0a1, 0xf0a2, 0xf0a3, 0xf0a4, 0xf0a5, 0xf0a6, 0xf0a7,
0xf0a8, 0xf0a9, 0xf0aa, 0xf0ab, 0xf0ac, 0xf0ad, 0xf0ae, 0xf0af,
0xf0b0, 0xf0b1, 0xf0b2, 0xf0b3, 0xf0b4, 0xf0b5, 0xf0b6, 0xf0b7,
0xf0b8, 0xf0b9, 0xf0ba, 0xf0bb, 0xf0bc, 0xf0bd, 0xf0be, 0xf0bf,
0xf0c0, 0xf0c1, 0xf0c2, 0xf0c3, 0xf0c4, 0xf0c5, 0xf0c6, 0xf0c7,
0xf0c8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0cc, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0d0, 0xf0d1, 0xf0d2, 0xf0d3, 0xf0d4, 0xf0d5, 0xf0d6, 0xf0d7,
0xf0d8, 0xf0d9, 0xf0da, 0xf0db, 0xf0dc, 0xf0dd, 0xf0de, 0xf0df,
0xf0e0, 0xf0e1, 0xf0e2, 0xf0e3, 0xf0e4, 0xf0e5, 0xf0e6, 0xf0e7,
0xf0e8, 0xf0e9, 0xf0ea, 0xf0eb, 0xf0ec, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0f0, 0xf0f1, 0xf0f2, 0xf0f3, 0xf0f4, 0xf0f5, 0xf0f6, 0xf0f7,
0xf0f8, 0xf0f9, 0xf0fa, 0xf0fb, 0xf0fc, 0xf0fd, 0xf0fe, 0xf0ff
}
};
/* The standard kernel character-to-font mappings are not invertible
-- this is just a best effort. */
#define MAX_GLYPH 512 /* Max possible glyph value */
static int inv_translate[MAX_NR_CONSOLES];
struct uni_pagedir {
u16 **uni_pgdir[32];
unsigned long refcount;
unsigned long sum;
unsigned char *inverse_translations[4];
u16 *inverse_trans_unicode;
int readonly;
};
static struct uni_pagedir *dflt;
static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
{
int j, glyph;
unsigned short *t = translations[i];
unsigned char *q;
if (!p) return;
q = p->inverse_translations[i];
if (!q) {
q = p->inverse_translations[i] = (unsigned char *)
kmalloc(MAX_GLYPH, GFP_KERNEL);
if (!q) return;
}
memset(q, 0, MAX_GLYPH);
for (j = 0; j < E_TABSZ; j++) {
glyph = conv_uni_to_pc(conp, t[j]);
if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
/* prefer '-' above SHY etc. */
q[glyph] = j;
}
}
}
static void set_inverse_trans_unicode(struct vc_data *conp,
struct uni_pagedir *p)
{
int i, j, k, glyph;
u16 **p1, *p2;
u16 *q;
if (!p) return;
q = p->inverse_trans_unicode;
if (!q) {
q = p->inverse_trans_unicode =
kmalloc(MAX_GLYPH * sizeof(u16), GFP_KERNEL);
if (!q)
return;
}
memset(q, 0, MAX_GLYPH * sizeof(u16));
for (i = 0; i < 32; i++) {
p1 = p->uni_pgdir[i];
if (!p1)
continue;
for (j = 0; j < 32; j++) {
p2 = p1[j];
if (!p2)
continue;
for (k = 0; k < 64; k++) {
glyph = p2[k];
if (glyph >= 0 && glyph < MAX_GLYPH
&& q[glyph] < 32)
q[glyph] = (i << 11) + (j << 6) + k;
}
}
}
}
unsigned short *set_translate(int m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
}
/*
* Inverse translation is impossible for several reasons:
* 1. The font<->character maps are not 1-1.
* 2. The text may have been written while a different translation map
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
{
struct uni_pagedir *p;
int m;
if (glyph < 0 || glyph >= MAX_GLYPH)
return 0;
else if (!(p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc))
return glyph;
else if (use_unicode) {
if (!p->inverse_trans_unicode)
return glyph;
else
return p->inverse_trans_unicode[glyph];
} else {
m = inv_translate[conp->vc_num];
if (!p->inverse_translations[m])
return glyph;
else
return p->inverse_translations[m][glyph];
}
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
struct uni_pagedir *p, *q = NULL;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
p = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
set_inverse_trans_unicode(vc_cons[i].d, p);
q = p;
}
}
}
/*
* Load customizable translation table
* arg points to a 256 byte translation table.
*
* The "old" variants are for translation directly to font (using the
* 0xf000-0xf0ff "transparent" Unicodes) whereas the "new" variants set
* Unicodes explicitly.
*/
int con_set_trans_old(unsigned char __user * arg)
{
int i;
unsigned short *p = translations[USER_MAP];
if (!access_ok(VERIFY_READ, arg, E_TABSZ))
return -EFAULT;
for (i=0; i<E_TABSZ ; i++) {
unsigned char uc;
__get_user(uc, arg+i);
p[i] = UNI_DIRECT_BASE | uc;
}
update_user_maps();
return 0;
}
int con_get_trans_old(unsigned char __user * arg)
{
int i, ch;
unsigned short *p = translations[USER_MAP];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
return -EFAULT;
for (i=0; i<E_TABSZ ; i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
__put_user((ch & ~0xff) ? 0 : ch, arg+i);
}
return 0;
}
int con_set_trans_new(ushort __user * arg)
{
int i;
unsigned short *p = translations[USER_MAP];
if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
for (i=0; i<E_TABSZ ; i++) {
unsigned short us;
__get_user(us, arg+i);
p[i] = us;
}
update_user_maps();
return 0;
}
int con_get_trans_new(ushort __user * arg)
{
int i;
unsigned short *p = translations[USER_MAP];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
for (i=0; i<E_TABSZ ; i++)
__put_user(p[i], arg+i);
return 0;
}
/*
* Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
* A hashtable is somewhat of a pain to deal with, so use a
* "paged table" instead. Simulation has shown the memory cost of
* this 3-level paged table scheme to be comparable to a hash table.
*/
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
static void con_release_unimap(struct uni_pagedir *p)
{
u16 **p1;
int i, j;
if (p == dflt) dflt = NULL;
for (i = 0; i < 32; i++) {
if ((p1 = p->uni_pgdir[i]) != NULL) {
for (j = 0; j < 32; j++)
kfree(p1[j]);
kfree(p1);
}
p->uni_pgdir[i] = NULL;
}
for (i = 0; i < 4; i++) {
kfree(p->inverse_translations[i]);
p->inverse_translations[i] = NULL;
}
if (p->inverse_trans_unicode) {
kfree(p->inverse_trans_unicode);
p->inverse_trans_unicode = NULL;
}
}
void con_free_unimap(struct vc_data *vc)
{
struct uni_pagedir *p;
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
if (!p)
return;
*vc->vc_uni_pagedir_loc = 0;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
{
int i, j, k;
struct uni_pagedir *q;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
q = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc;
if (!q || q == p || q->sum != p->sum)
continue;
for (j = 0; j < 32; j++) {
u16 **p1, **q1;
p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
if (!p1 && !q1)
continue;
if (!p1 || !q1)
break;
for (k = 0; k < 32; k++) {
if (!p1[k] && !q1[k])
continue;
if (!p1[k] || !q1[k])
break;
if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
break;
}
if (k < 32)
break;
}
if (j == 32) {
q->refcount++;
*conp->vc_uni_pagedir_loc = (unsigned long)q;
con_release_unimap(p);
kfree(p);
return 1;
}
}
return 0;
}
static int
con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
{
int i, n;
u16 **p1, *p2;
if (!(p1 = p->uni_pgdir[n = unicode >> 11])) {
p1 = p->uni_pgdir[n] = kmalloc(32*sizeof(u16 *), GFP_KERNEL);
if (!p1) return -ENOMEM;
for (i = 0; i < 32; i++)
p1[i] = NULL;
}
if (!(p2 = p1[n = (unicode >> 6) & 0x1f])) {
p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
if (!p2) return -ENOMEM;
memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
}
p2[unicode & 0x3f] = fontpos;
p->sum += (fontpos << 20) + unicode;
return 0;
}
/* ui is a leftover from using a hashtable, but might be used again */
int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
{
struct uni_pagedir *p, *q;
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
if (p && p->readonly) return -EIO;
if (!p || --p->refcount) {
q = kzalloc(sizeof(*p), GFP_KERNEL);
if (!q) {
if (p) p->refcount++;
return -ENOMEM;
}
q->refcount=1;
*vc->vc_uni_pagedir_loc = (unsigned long)q;
} else {
if (p == dflt) dflt = NULL;
p->refcount++;
p->sum = 0;
con_release_unimap(p);
}
return 0;
}
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
int err = 0, err1, i;
struct uni_pagedir *p, *q;
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
if (p->readonly) return -EIO;
if (!ct) return 0;
if (p->refcount > 1) {
int j, k;
u16 **p1, *p2, l;
err1 = con_clear_unimap(vc, NULL);
if (err1) return err1;
q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
for (i = 0, l = 0; i < 32; i++)
if ((p1 = p->uni_pgdir[i]))
for (j = 0; j < 32; j++)
if ((p2 = p1[j]))
for (k = 0; k < 64; k++, l++)
if (p2[k] != 0xffff) {
err1 = con_insert_unipair(q, l, p2[k]);
if (err1) {
p->refcount++;
*vc->vc_uni_pagedir_loc = (unsigned long)p;
con_release_unimap(q);
kfree(q);
return err1;
}
}
p = q;
} else if (p == dflt)
dflt = NULL;
while (ct--) {
unsigned short unicode, fontpos;
__get_user(unicode, &list->unicode);
__get_user(fontpos, &list->fontpos);
if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0)
err = err1;
list++;
}
if (con_unify_unimap(vc, p))
return err;
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update all inverse translations */
set_inverse_trans_unicode(vc, p);
return err;
}
/* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
The representation used was the most compact I could come up
with. This routine is executed at sys_setup time, and when the
PIO_FONTRESET ioctl is called. */
int con_set_default_unimap(struct vc_data *vc)
{
int i, j, err = 0, err1;
u16 *q;
struct uni_pagedir *p;
if (dflt) {
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
if (p == dflt)
return 0;
dflt->refcount++;
*vc->vc_uni_pagedir_loc = (unsigned long)dflt;
if (p && --p->refcount) {
con_release_unimap(p);
kfree(p);
}
return 0;
}
/* The default font is always 256 characters */
err = con_clear_unimap(vc, NULL);
if (err) return err;
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
q = dfont_unitable;
for (i = 0; i < 256; i++)
for (j = dfont_unicount[i]; j; j--) {
err1 = con_insert_unipair(p, *(q++), i);
if (err1)
err = err1;
}
if (con_unify_unimap(vc, p)) {
dflt = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
return err;
}
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update all inverse translations */
set_inverse_trans_unicode(vc, p);
dflt = p;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
struct uni_pagedir *q;
if (!*src_vc->vc_uni_pagedir_loc)
return -EINVAL;
if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
return 0;
con_free_unimap(dst_vc);
q = (struct uni_pagedir *)*src_vc->vc_uni_pagedir_loc;
q->refcount++;
*dst_vc->vc_uni_pagedir_loc = (long)q;
return 0;
}
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
{
int i, j, k, ect;
u16 **p1, *p2;
struct uni_pagedir *p;
ect = 0;
if (*vc->vc_uni_pagedir_loc) {
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
for (i = 0; i < 32; i++)
if ((p1 = p->uni_pgdir[i]))
for (j = 0; j < 32; j++)
if ((p2 = *(p1++)))
for (k = 0; k < 64; k++) {
if (*p2 < MAX_GLYPH && ect++ < ct) {
__put_user((u_short)((i<<11)+(j<<6)+k),
&list->unicode);
__put_user((u_short) *p2,
&list->fontpos);
list++;
}
p2++;
}
}
__put_user(ect, uct);
return ((ect <= ct) ? 0 : -ENOMEM);
}
void con_protect_unimap(struct vc_data *vc, int rdonly)
{
struct uni_pagedir *p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
if (p)
p->readonly = rdonly;
}
/*
* Always use USER_MAP. These functions are used by the keyboard,
* which shouldn't be affected by G0/G1 switching, etc.
* If the user map still contains default values, i.e. the
* direct-to-font mapping, then assume user is using Latin1.
*/
/* may be called during an interrupt */
u32 conv_8bit_to_uni(unsigned char c)
{
unsigned short uni = translations[USER_MAP][c];
return uni == (0xf000 | c) ? c : uni;
}
int conv_uni_to_8bit(u32 uni)
{
int c;
for (c = 0; c < 0x100; c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
int
conv_uni_to_pc(struct vc_data *conp, long ucs)
{
int h;
u16 **p1, *p2;
struct uni_pagedir *p;
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
else if (ucs < 0x20)
return -1; /* Not a printable character */
else if (ucs == 0xfeff || (ucs >= 0x200b && ucs <= 0x200f))
return -2; /* Zero-width space */
/*
* UNI_DIRECT_BASE indicates the start of the region in the User Zone
* which always has a 1:1 mapping to the currently loaded font. The
* UNI_DIRECT_MASK indicates the bit span of the region.
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
if (!*conp->vc_uni_pagedir_loc)
return -3;
p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc;
if ((p1 = p->uni_pgdir[ucs >> 11]) &&
(p2 = p1[(ucs >> 6) & 0x1f]) &&
(h = p2[ucs & 0x3f]) < MAX_GLYPH)
return h;
return -4; /* not found */
}
/*
* This is called at sys_setup time, after memory and the console are
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
void __init
console_map_init(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
con_set_default_unimap(vc_cons[i].d);
}
EXPORT_SYMBOL(con_copy_unimap);
| gpl-2.0 |
akshay-shah/android_kernel_samsung_crater | arch/arm/plat-samsung/clock-clksrc.c | 4081 | 4963 | /* linux/arch/arm/plat-samsung/clock-clksrc.c
*
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/sysdev.h>
#include <linux/io.h>
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <plat/cpu-freq.h>
static inline struct clksrc_clk *to_clksrc(struct clk *clk)
{
return container_of(clk, struct clksrc_clk, clk);
}
static inline u32 bit_mask(u32 shift, u32 nr_bits)
{
u32 mask = 0xffffffff >> (32 - nr_bits);
return mask << shift;
}
static unsigned long s3c_getrate_clksrc(struct clk *clk)
{
struct clksrc_clk *sclk = to_clksrc(clk);
unsigned long rate = clk_get_rate(clk->parent);
u32 clkdiv = __raw_readl(sclk->reg_div.reg);
u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size);
clkdiv &= mask;
clkdiv >>= sclk->reg_div.shift;
clkdiv++;
rate /= clkdiv;
return rate;
}
static int s3c_setrate_clksrc(struct clk *clk, unsigned long rate)
{
struct clksrc_clk *sclk = to_clksrc(clk);
void __iomem *reg = sclk->reg_div.reg;
unsigned int div;
u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size);
u32 val;
rate = clk_round_rate(clk, rate);
div = clk_get_rate(clk->parent) / rate;
if (div > (1 << sclk->reg_div.size))
return -EINVAL;
val = __raw_readl(reg);
val &= ~mask;
val |= (div - 1) << sclk->reg_div.shift;
__raw_writel(val, reg);
return 0;
}
static int s3c_setparent_clksrc(struct clk *clk, struct clk *parent)
{
struct clksrc_clk *sclk = to_clksrc(clk);
struct clksrc_sources *srcs = sclk->sources;
u32 clksrc = __raw_readl(sclk->reg_src.reg);
u32 mask = bit_mask(sclk->reg_src.shift, sclk->reg_src.size);
int src_nr = -1;
int ptr;
for (ptr = 0; ptr < srcs->nr_sources; ptr++)
if (srcs->sources[ptr] == parent) {
src_nr = ptr;
break;
}
if (src_nr >= 0) {
clk->parent = parent;
clksrc &= ~mask;
clksrc |= src_nr << sclk->reg_src.shift;
__raw_writel(clksrc, sclk->reg_src.reg);
return 0;
}
return -EINVAL;
}
static unsigned long s3c_roundrate_clksrc(struct clk *clk,
unsigned long rate)
{
struct clksrc_clk *sclk = to_clksrc(clk);
unsigned long parent_rate = clk_get_rate(clk->parent);
int max_div = 1 << sclk->reg_div.size;
int div;
if (rate >= parent_rate)
rate = parent_rate;
else {
div = parent_rate / rate;
if (parent_rate % rate)
div++;
if (div == 0)
div = 1;
if (div > max_div)
div = max_div;
rate = parent_rate / div;
}
return rate;
}
/* Clock initialisation code */
void __init_or_cpufreq s3c_set_clksrc(struct clksrc_clk *clk, bool announce)
{
struct clksrc_sources *srcs = clk->sources;
u32 mask = bit_mask(clk->reg_src.shift, clk->reg_src.size);
u32 clksrc;
if (!clk->reg_src.reg) {
if (!clk->clk.parent)
printk(KERN_ERR "%s: no parent clock specified\n",
clk->clk.name);
return;
}
clksrc = __raw_readl(clk->reg_src.reg);
clksrc &= mask;
clksrc >>= clk->reg_src.shift;
if (clksrc > srcs->nr_sources || !srcs->sources[clksrc]) {
printk(KERN_ERR "%s: bad source %d\n",
clk->clk.name, clksrc);
return;
}
clk->clk.parent = srcs->sources[clksrc];
if (announce)
printk(KERN_INFO "%s: source is %s (%d), rate is %ld\n",
clk->clk.name, clk->clk.parent->name, clksrc,
clk_get_rate(&clk->clk));
}
static struct clk_ops clksrc_ops = {
.set_parent = s3c_setparent_clksrc,
.get_rate = s3c_getrate_clksrc,
.set_rate = s3c_setrate_clksrc,
.round_rate = s3c_roundrate_clksrc,
};
static struct clk_ops clksrc_ops_nodiv = {
.set_parent = s3c_setparent_clksrc,
};
static struct clk_ops clksrc_ops_nosrc = {
.get_rate = s3c_getrate_clksrc,
.set_rate = s3c_setrate_clksrc,
.round_rate = s3c_roundrate_clksrc,
};
void __init s3c_register_clksrc(struct clksrc_clk *clksrc, int size)
{
int ret;
for (; size > 0; size--, clksrc++) {
if (!clksrc->reg_div.reg && !clksrc->reg_src.reg)
printk(KERN_ERR "%s: clock %s has no registers set\n",
__func__, clksrc->clk.name);
/* fill in the default functions */
if (!clksrc->clk.ops) {
if (!clksrc->reg_div.reg)
clksrc->clk.ops = &clksrc_ops_nodiv;
else if (!clksrc->reg_src.reg)
clksrc->clk.ops = &clksrc_ops_nosrc;
else
clksrc->clk.ops = &clksrc_ops;
}
/* setup the clocksource, but do not announce it
* as it may be re-set by the setup routines
* called after the rest of the clocks have been
* registered
*/
s3c_set_clksrc(clksrc, false);
ret = s3c24xx_register_clock(&clksrc->clk);
if (ret < 0) {
printk(KERN_ERR "%s: failed to register %s (%d)\n",
__func__, clksrc->clk.name, ret);
}
}
}
| gpl-2.0 |
lexi6725/linux-3.17.1 | arch/m68k/hp300/time.c | 4337 | 1954 | /*
* linux/arch/m68k/hp300/time.c
*
* Copyright (C) 1998 Philip Blundell <philb@gnu.org>
*
* This file contains the HP300-specific time handling code.
*/
#include <asm/ptrace.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/traps.h>
#include <asm/blinken.h>
/* Clock hardware definitions */
#define CLOCKBASE 0xf05f8000
#define CLKCR1 0x1
#define CLKCR2 0x3
#define CLKCR3 CLKCR1
#define CLKSR CLKCR2
#define CLKMSB1 0x5
#define CLKMSB2 0x9
#define CLKMSB3 0xD
/* This is for machines which generate the exact clock. */
#define USECS_PER_JIFFY (1000000/HZ)
#define INTVAL ((10000 / 4) - 1)
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
unsigned long tmp;
irq_handler_t vector = dev_id;
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
return vector(irq, NULL);
}
u32 hp300_gettimeoffset(void)
{
/* Read current timer 1 value */
unsigned char lsb, msb1, msb2;
unsigned short ticks;
msb1 = in_8(CLOCKBASE + 5);
lsb = in_8(CLOCKBASE + 7);
msb2 = in_8(CLOCKBASE + 5);
if (msb1 != msb2)
/* A carry happened while we were reading. Read it again */
lsb = in_8(CLOCKBASE + 7);
ticks = INTVAL - ((msb2 << 8) | lsb);
return ((USECS_PER_JIFFY * ticks) / INTVAL) * 1000;
}
void __init hp300_sched_init(irq_handler_t vector)
{
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x1); /* reset */
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
}
| gpl-2.0 |
whyorean/android_kernel_xiaomi_msm8996 | arch/blackfin/kernel/cplb-nompu/cplbinit.c | 4337 | 6178 | /*
* Blackfin CPLB initialization
*
* Copyright 2007-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
#include <asm/cplbinit.h>
#include <asm/mem_map.h>
struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
int first_switched_icplb PDT_ATTR;
int first_switched_dcplb PDT_ATTR;
struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
struct cplb_boundary icplb_bounds[9] PDT_ATTR;
int icplb_nr_bounds PDT_ATTR;
int dcplb_nr_bounds PDT_ATTR;
void __init generate_cplb_tables_cpu(unsigned int cpu)
{
int i_d, i_i;
unsigned long addr;
unsigned long cplb_pageflags, cplb_pagesize;
struct cplb_entry *d_tbl = dcplb_tbl[cpu];
struct cplb_entry *i_tbl = icplb_tbl[cpu];
printk(KERN_INFO "NOMPU: setting up cplb tables\n");
i_d = i_i = 0;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
/* Set up the zero page. */
d_tbl[i_d].addr = 0;
d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
i_tbl[i_i].addr = 0;
i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
#endif
/* Cover kernel memory with 4M pages. */
addr = 0;
#ifdef PAGE_SIZE_16MB
cplb_pageflags = PAGE_SIZE_16MB;
cplb_pagesize = SIZE_16M;
#else
cplb_pageflags = PAGE_SIZE_4MB;
cplb_pagesize = SIZE_4M;
#endif
for (; addr < memory_start; addr += cplb_pagesize) {
d_tbl[i_d].addr = addr;
d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
i_tbl[i_i].addr = addr;
i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
}
#ifdef CONFIG_ROMKERNEL
/* Cover kernel XIP flash area */
#ifdef CONFIG_BF60x
addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
d_tbl[i_d].addr = addr;
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
i_tbl[i_i].addr = addr;
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
#else
addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
d_tbl[i_d].addr = addr;
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
i_tbl[i_i].addr = addr;
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
#endif
#endif
/* Cover L1 memory. One 4M area for code and data each is enough. */
if (cpu == 0) {
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
d_tbl[i_d].addr = L1_DATA_A_START;
d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
}
i_tbl[i_i].addr = L1_CODE_START;
i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
}
#ifdef CONFIG_SMP
else {
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
d_tbl[i_d].addr = COREB_L1_DATA_A_START;
d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
}
i_tbl[i_i].addr = COREB_L1_CODE_START;
i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
}
#endif
first_switched_dcplb = i_d;
first_switched_icplb = i_i;
BUG_ON(first_switched_dcplb > MAX_CPLBS);
BUG_ON(first_switched_icplb > MAX_CPLBS);
while (i_d < MAX_CPLBS)
d_tbl[i_d++].data = 0;
while (i_i < MAX_CPLBS)
i_tbl[i_i++].data = 0;
}
void __init generate_cplb_tables_all(void)
{
unsigned long uncached_end;
int i_d, i_i;
i_d = 0;
/* Normal RAM, including MTD FS. */
#ifdef CONFIG_MTD_UCLINUX
uncached_end = memory_mtd_start + mtd_size;
#else
uncached_end = memory_end;
#endif
/*
* if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
* so that we don't have to use 4kB pages and cause CPLB thrashing
*/
if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
((_ramend - uncached_end) >= 1 * 1024 * 1024))
dcplb_bounds[i_d].eaddr = uncached_end;
else
dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
dcplb_bounds[i_d].eaddr = _ramend;
dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
}
if (_ramend != physical_mem_end) {
/* Reserved memory. */
dcplb_bounds[i_d].eaddr = physical_mem_end;
dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
SDRAM_DGENERIC : SDRAM_DNON_CHBL);
}
/* Addressing hole up to the async bank. */
dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
dcplb_bounds[i_d++].data = 0;
/* ASYNC banks. */
dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
dcplb_bounds[i_d++].data = SDRAM_EBIU;
/* Addressing hole up to BootROM. */
dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
dcplb_bounds[i_d++].data = 0;
/* BootROM -- largest one should be less than 1 meg. */
dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
if (L2_LENGTH) {
/* Addressing hole up to L2 SRAM. */
dcplb_bounds[i_d].eaddr = L2_START;
dcplb_bounds[i_d++].data = 0;
/* L2 SRAM. */
dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
dcplb_bounds[i_d++].data = L2_DMEMORY;
}
dcplb_nr_bounds = i_d;
BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
i_i = 0;
/* Normal RAM, including MTD FS. */
icplb_bounds[i_i].eaddr = uncached_end;
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
if (_ramend != physical_mem_end) {
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
/* Normally this hole is caught by the async below. */
icplb_bounds[i_i].eaddr = _ramend;
icplb_bounds[i_i++].data = 0;
}
/* Reserved memory. */
icplb_bounds[i_i].eaddr = physical_mem_end;
icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
SDRAM_IGENERIC : SDRAM_INON_CHBL);
}
/* Addressing hole up to the async bank. */
icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
icplb_bounds[i_i++].data = 0;
/* ASYNC banks. */
icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
icplb_bounds[i_i++].data = SDRAM_EBIU;
/* Addressing hole up to BootROM. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START;
icplb_bounds[i_i++].data = 0;
/* BootROM -- largest one should be less than 1 meg. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
if (L2_LENGTH) {
/* Addressing hole up to L2 SRAM. */
icplb_bounds[i_i].eaddr = L2_START;
icplb_bounds[i_i++].data = 0;
/* L2 SRAM. */
icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
icplb_bounds[i_i++].data = L2_IMEMORY;
}
icplb_nr_bounds = i_i;
BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
}
| gpl-2.0 |
iamroot9C-arm/linux | arch/powerpc/platforms/cell/spider-pic.c | 4593 | 11090 | /*
* External Interrupt Controller on Spider South Bridge
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ioport.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/io.h>
#include "interrupt.h"
/* register layout taken from Spider spec, table 7.4-4 */
enum {
TIR_DEN = 0x004, /* Detection Enable Register */
TIR_MSK = 0x084, /* Mask Level Register */
TIR_EDC = 0x0c0, /* Edge Detection Clear Register */
TIR_PNDA = 0x100, /* Pending Register A */
TIR_PNDB = 0x104, /* Pending Register B */
TIR_CS = 0x144, /* Current Status Register */
TIR_LCSA = 0x150, /* Level Current Status Register A */
TIR_LCSB = 0x154, /* Level Current Status Register B */
TIR_LCSC = 0x158, /* Level Current Status Register C */
TIR_LCSD = 0x15c, /* Level Current Status Register D */
TIR_CFGA = 0x200, /* Setting Register A0 */
TIR_CFGB = 0x204, /* Setting Register B0 */
/* 0x208 ... 0x3ff Setting Register An/Bn */
TIR_PPNDA = 0x400, /* Packet Pending Register A */
TIR_PPNDB = 0x404, /* Packet Pending Register B */
TIR_PIERA = 0x408, /* Packet Output Error Register A */
TIR_PIERB = 0x40c, /* Packet Output Error Register B */
TIR_PIEN = 0x444, /* Packet Output Enable Register */
TIR_PIPND = 0x454, /* Packet Output Pending Register */
TIRDID = 0x484, /* Spider Device ID Register */
REISTIM = 0x500, /* Reissue Command Timeout Time Setting */
REISTIMEN = 0x504, /* Reissue Command Timeout Setting */
REISWAITEN = 0x508, /* Reissue Wait Control*/
};
#define SPIDER_CHIP_COUNT 4
#define SPIDER_SRC_COUNT 64
#define SPIDER_IRQ_INVALID 63
struct spider_pic {
struct irq_domain *host;
void __iomem *regs;
unsigned int node_id;
};
static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
}
static void __iomem *spider_get_irq_config(struct spider_pic *pic,
unsigned int src)
{
return pic->regs + TIR_CFGA + 8 * src;
}
static void spider_unmask_irq(struct irq_data *d)
{
struct spider_pic *pic = spider_irq_data_to_pic(d);
void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) | 0x30000000u);
}
static void spider_mask_irq(struct irq_data *d)
{
struct spider_pic *pic = spider_irq_data_to_pic(d);
void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
static void spider_ack_irq(struct irq_data *d)
{
struct spider_pic *pic = spider_irq_data_to_pic(d);
unsigned int src = irqd_to_hwirq(d);
/* Reset edge detection logic if necessary
*/
if (irqd_is_level_type(d))
return;
/* Only interrupts 47 to 50 can be set to edge */
if (src < 47 || src > 50)
return;
/* Perform the clear of the edge logic */
out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf));
}
static int spider_set_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
struct spider_pic *pic = spider_irq_data_to_pic(d);
unsigned int hw = irqd_to_hwirq(d);
void __iomem *cfg = spider_get_irq_config(pic, hw);
u32 old_mask;
u32 ic;
/* Note that only level high is supported for most interrupts */
if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH &&
(hw < 47 || hw > 50))
return -EINVAL;
/* Decode sense type */
switch(sense) {
case IRQ_TYPE_EDGE_RISING:
ic = 0x3;
break;
case IRQ_TYPE_EDGE_FALLING:
ic = 0x2;
break;
case IRQ_TYPE_LEVEL_LOW:
ic = 0x0;
break;
case IRQ_TYPE_LEVEL_HIGH:
case IRQ_TYPE_NONE:
ic = 0x1;
break;
default:
return -EINVAL;
}
/* Configure the source. One gross hack that was there before and
* that I've kept around is the priority to the BE which I set to
* be the same as the interrupt source number. I don't know wether
* that's supposed to make any kind of sense however, we'll have to
* decide that, but for now, I'm not changing the behaviour.
*/
old_mask = in_be32(cfg) & 0x30000000u;
out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) |
(pic->node_id << 4) | 0xe);
out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff));
return 0;
}
static struct irq_chip spider_pic = {
.name = "SPIDER",
.irq_unmask = spider_unmask_irq,
.irq_mask = spider_mask_irq,
.irq_ack = spider_ack_irq,
.irq_set_type = spider_set_irq_type,
};
static int spider_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
/* Spider interrupts have 2 cells, first is the interrupt source,
* second, well, I don't know for sure yet ... We mask the top bits
* because old device-trees encode a node number in there
*/
*out_hwirq = intspec[0] & 0x3f;
*out_flags = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
static const struct irq_domain_ops spider_host_ops = {
.map = spider_host_map,
.xlate = spider_host_xlate,
};
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct spider_pic *pic = irq_desc_get_handler_data(desc);
unsigned int cs, virq;
cs = in_be32(pic->regs + TIR_CS) >> 24;
if (cs == SPIDER_IRQ_INVALID)
virq = NO_IRQ;
else
virq = irq_linear_revmap(pic->host, cs);
if (virq != NO_IRQ)
generic_handle_irq(virq);
chip->irq_eoi(&desc->irq_data);
}
/* For hooking up the cascace we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two
* informations though: which BE node we are connected to and wether
* we are connected to IOIF0 or IOIF1. Right now, we really only care
* about the IBM cell blade and we know that its firmware gives us an
* interrupt-map property which is pretty strange.
*/
static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
{
unsigned int virq;
const u32 *imap, *tmp;
int imaplen, intsize, unit;
struct device_node *iic;
/* First, we check wether we have a real "interrupts" in the device
* tree in case the device-tree is ever fixed
*/
struct of_irq oirq;
if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) {
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
return virq;
}
/* Now do the horrible hacks */
tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
if (tmp == NULL)
return NO_IRQ;
intsize = *tmp;
imap = of_get_property(pic->host->of_node, "interrupt-map", &imaplen);
if (imap == NULL || imaplen < (intsize + 1))
return NO_IRQ;
iic = of_find_node_by_phandle(imap[intsize]);
if (iic == NULL)
return NO_IRQ;
imap += intsize + 1;
tmp = of_get_property(iic, "#interrupt-cells", NULL);
if (tmp == NULL) {
of_node_put(iic);
return NO_IRQ;
}
intsize = *tmp;
/* Assume unit is last entry of interrupt specifier */
unit = imap[intsize - 1];
/* Ok, we have a unit, now let's try to get the node */
tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL);
if (tmp == NULL) {
of_node_put(iic);
return NO_IRQ;
}
/* ugly as hell but works for now */
pic->node_id = (*tmp) >> 1;
of_node_put(iic);
/* Ok, now let's get cracking. You may ask me why I just didn't match
* the iic host from the iic OF node, but that way I'm still compatible
* with really really old old firmwares for which we don't have a node
*/
/* Manufacture an IIC interrupt number of class 2 */
virq = irq_create_mapping(NULL,
(pic->node_id << IIC_IRQ_NODE_SHIFT) |
(2 << IIC_IRQ_CLASS_SHIFT) |
unit);
if (virq == NO_IRQ)
printk(KERN_ERR "spider_pic: failed to map cascade !");
return virq;
}
static void __init spider_init_one(struct device_node *of_node, int chip,
unsigned long addr)
{
struct spider_pic *pic = &spider_pics[chip];
int i, virq;
/* Map registers */
pic->regs = ioremap(addr, 0x1000);
if (pic->regs == NULL)
panic("spider_pic: can't map registers !");
/* Allocate a host */
pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
&spider_host_ops, pic);
if (pic->host == NULL)
panic("spider_pic: can't allocate irq host !");
/* Go through all sources and disable them */
for (i = 0; i < SPIDER_SRC_COUNT; i++) {
void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i;
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
/* do not mask any interrupts because of level */
out_be32(pic->regs + TIR_MSK, 0x0);
/* enable interrupt packets to be output */
out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1);
/* Hook up the cascade interrupt to the iic and nodeid */
virq = spider_find_cascade_and_node(pic);
if (virq == NO_IRQ)
return;
irq_set_handler_data(virq, pic);
irq_set_chained_handler(virq, spider_irq_cascade);
printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
pic->node_id, addr, of_node->full_name);
/* Enable the interrupt detection enable bit. Do this last! */
out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
}
void __init spider_init_IRQ(void)
{
struct resource r;
struct device_node *dn;
int chip = 0;
/* XXX node numbers are totally bogus. We _hope_ we get the device
* nodes in the right order here but that's definitely not guaranteed,
* we need to get the node from the device tree instead.
* There is currently no proper property for it (but our whole
* device-tree is bogus anyway) so all we can do is pray or maybe test
* the address and deduce the node-id
*/
for (dn = NULL;
(dn = of_find_node_by_name(dn, "interrupt-controller"));) {
if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) {
if (of_address_to_resource(dn, 0, &r)) {
printk(KERN_WARNING "spider-pic: Failed\n");
continue;
}
} else if (of_device_is_compatible(dn, "sti,platform-spider-pic")
&& (chip < 2)) {
static long hard_coded_pics[] =
{ 0x24000008000ul, 0x34000008000ul};
r.start = hard_coded_pics[chip];
} else
continue;
spider_init_one(dn, chip++, r.start);
}
}
| gpl-2.0 |
RoyMcBaster/kernel_hammerhead | arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | 4593 | 4062 | /*
* PQ2 ADS-style PCI interrupt controller
*
* Copyright 2007 Freescale Semiconductor, Inc.
* Author: Scott Wood <scottwood@freescale.com>
*
* Loosely based on mpc82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com>
* Copyright (c) 2006 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/cpm2.h>
#include "pq2.h"
static DEFINE_RAW_SPINLOCK(pci_pic_lock);
struct pq2ads_pci_pic {
struct device_node *node;
struct irq_domain *host;
struct {
u32 stat;
u32 mask;
} __iomem *regs;
};
#define NUM_IRQS 32
static void pq2ads_pci_mask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
raw_spin_lock_irqsave(&pci_pic_lock, flags);
setbits32(&priv->regs->mask, 1 << irq);
mb();
raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
}
}
static void pq2ads_pci_unmask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
raw_spin_lock_irqsave(&pci_pic_lock, flags);
clrbits32(&priv->regs->mask, 1 << irq);
raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
}
}
static struct irq_chip pq2ads_pci_ic = {
.name = "PQ2 ADS PCI",
.irq_mask = pq2ads_pci_mask_irq,
.irq_mask_ack = pq2ads_pci_mask_irq,
.irq_ack = pq2ads_pci_mask_irq,
.irq_unmask = pq2ads_pci_unmask_irq,
.irq_enable = pq2ads_pci_unmask_irq,
.irq_disable = pq2ads_pci_mask_irq
};
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
int bit;
for (;;) {
stat = in_be32(&priv->regs->stat);
mask = in_be32(&priv->regs->mask);
pend = stat & ~mask;
if (!pend)
break;
for (bit = 0; pend != 0; ++bit, pend <<= 1) {
if (pend & 0x80000000) {
int virq = irq_linear_revmap(priv->host, bit);
generic_handle_irq(virq);
}
}
}
}
static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pci_pic_host_ops = {
.map = pci_pic_host_map,
};
int __init pq2ads_pci_init_irq(void)
{
struct pq2ads_pci_pic *priv;
struct irq_domain *host;
struct device_node *np;
int ret = -ENODEV;
int irq;
np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic");
if (!np) {
printk(KERN_ERR "No pci pic node in device tree.\n");
of_node_put(np);
goto out;
}
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
printk(KERN_ERR "No interrupt in pci pic node.\n");
of_node_put(np);
goto out;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
of_node_put(np);
ret = -ENOMEM;
goto out_unmap_irq;
}
/* PCI interrupt controller registers: status and mask */
priv->regs = of_iomap(np, 0);
if (!priv->regs) {
printk(KERN_ERR "Cannot map PCI PIC registers.\n");
goto out_free_bootmem;
}
/* mask all PCI interrupts */
out_be32(&priv->regs->mask, ~0);
mb();
host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
if (!host) {
ret = -ENOMEM;
goto out_unmap_regs;
}
priv->host = host;
irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
of_node_put(np);
return 0;
out_unmap_regs:
iounmap(priv->regs);
out_free_bootmem:
free_bootmem((unsigned long)priv,
sizeof(struct pq2ads_pci_pic));
of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
out:
return ret;
}
| gpl-2.0 |
crpalmer/android_kernel_samsung_msm8974 | arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | 4593 | 4062 | /*
* PQ2 ADS-style PCI interrupt controller
*
* Copyright 2007 Freescale Semiconductor, Inc.
* Author: Scott Wood <scottwood@freescale.com>
*
* Loosely based on mpc82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com>
* Copyright (c) 2006 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/cpm2.h>
#include "pq2.h"
static DEFINE_RAW_SPINLOCK(pci_pic_lock);
struct pq2ads_pci_pic {
struct device_node *node;
struct irq_domain *host;
struct {
u32 stat;
u32 mask;
} __iomem *regs;
};
#define NUM_IRQS 32
static void pq2ads_pci_mask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
raw_spin_lock_irqsave(&pci_pic_lock, flags);
setbits32(&priv->regs->mask, 1 << irq);
mb();
raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
}
}
static void pq2ads_pci_unmask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
raw_spin_lock_irqsave(&pci_pic_lock, flags);
clrbits32(&priv->regs->mask, 1 << irq);
raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
}
}
static struct irq_chip pq2ads_pci_ic = {
.name = "PQ2 ADS PCI",
.irq_mask = pq2ads_pci_mask_irq,
.irq_mask_ack = pq2ads_pci_mask_irq,
.irq_ack = pq2ads_pci_mask_irq,
.irq_unmask = pq2ads_pci_unmask_irq,
.irq_enable = pq2ads_pci_unmask_irq,
.irq_disable = pq2ads_pci_mask_irq
};
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
int bit;
for (;;) {
stat = in_be32(&priv->regs->stat);
mask = in_be32(&priv->regs->mask);
pend = stat & ~mask;
if (!pend)
break;
for (bit = 0; pend != 0; ++bit, pend <<= 1) {
if (pend & 0x80000000) {
int virq = irq_linear_revmap(priv->host, bit);
generic_handle_irq(virq);
}
}
}
}
static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pci_pic_host_ops = {
.map = pci_pic_host_map,
};
int __init pq2ads_pci_init_irq(void)
{
struct pq2ads_pci_pic *priv;
struct irq_domain *host;
struct device_node *np;
int ret = -ENODEV;
int irq;
np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic");
if (!np) {
printk(KERN_ERR "No pci pic node in device tree.\n");
of_node_put(np);
goto out;
}
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
printk(KERN_ERR "No interrupt in pci pic node.\n");
of_node_put(np);
goto out;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
of_node_put(np);
ret = -ENOMEM;
goto out_unmap_irq;
}
/* PCI interrupt controller registers: status and mask */
priv->regs = of_iomap(np, 0);
if (!priv->regs) {
printk(KERN_ERR "Cannot map PCI PIC registers.\n");
goto out_free_bootmem;
}
/* mask all PCI interrupts */
out_be32(&priv->regs->mask, ~0);
mb();
host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
if (!host) {
ret = -ENOMEM;
goto out_unmap_regs;
}
priv->host = host;
irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
of_node_put(np);
return 0;
out_unmap_regs:
iounmap(priv->regs);
out_free_bootmem:
free_bootmem((unsigned long)priv,
sizeof(struct pq2ads_pci_pic));
of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
out:
return ret;
}
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_samsung_d2 | drivers/staging/pohmelfs/mcache.c | 4849 | 3920 | /*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include "netfs.h"
static struct kmem_cache *pohmelfs_mcache_cache;
static mempool_t *pohmelfs_mcache_pool;
static inline int pohmelfs_mcache_cmp(u64 gen, u64 new)
{
if (gen < new)
return 1;
if (gen > new)
return -1;
return 0;
}
struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen)
{
struct rb_root *root = &psb->mcache_root;
struct rb_node *n = root->rb_node;
struct pohmelfs_mcache *tmp, *ret = NULL;
int cmp;
while (n) {
tmp = rb_entry(n, struct pohmelfs_mcache, mcache_entry);
cmp = pohmelfs_mcache_cmp(tmp->gen, gen);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else {
ret = tmp;
pohmelfs_mcache_get(ret);
break;
}
}
return ret;
}
static int pohmelfs_mcache_insert(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
struct rb_root *root = &psb->mcache_root;
struct rb_node **n = &root->rb_node, *parent = NULL;
struct pohmelfs_mcache *ret = NULL, *tmp;
int cmp;
while (*n) {
parent = *n;
tmp = rb_entry(parent, struct pohmelfs_mcache, mcache_entry);
cmp = pohmelfs_mcache_cmp(tmp->gen, m->gen);
if (cmp < 0)
n = &parent->rb_left;
else if (cmp > 0)
n = &parent->rb_right;
else {
ret = tmp;
break;
}
}
if (ret)
return -EEXIST;
rb_link_node(&m->mcache_entry, parent, n);
rb_insert_color(&m->mcache_entry, root);
return 0;
}
static int pohmelfs_mcache_remove(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
if (m && m->mcache_entry.rb_parent_color) {
rb_erase(&m->mcache_entry, &psb->mcache_root);
m->mcache_entry.rb_parent_color = 0;
return 1;
}
return 0;
}
void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
mutex_lock(&psb->mcache_lock);
pohmelfs_mcache_remove(psb, m);
mutex_unlock(&psb->mcache_lock);
}
struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
unsigned int size, void *data)
{
struct pohmelfs_mcache *m;
int err = -ENOMEM;
m = mempool_alloc(pohmelfs_mcache_pool, GFP_KERNEL);
if (!m)
goto err_out_exit;
init_completion(&m->complete);
m->err = 0;
atomic_set(&m->refcnt, 1);
m->data = data;
m->start = start;
m->size = size;
m->gen = atomic_long_inc_return(&psb->mcache_gen);
mutex_lock(&psb->mcache_lock);
err = pohmelfs_mcache_insert(psb, m);
mutex_unlock(&psb->mcache_lock);
if (err)
goto err_out_free;
return m;
err_out_free:
mempool_free(m, pohmelfs_mcache_pool);
err_out_exit:
return ERR_PTR(err);
}
void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
pohmelfs_mcache_remove_locked(psb, m);
mempool_free(m, pohmelfs_mcache_pool);
}
int __init pohmelfs_mcache_init(void)
{
pohmelfs_mcache_cache = kmem_cache_create("pohmelfs_mcache_cache",
sizeof(struct pohmelfs_mcache),
0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), NULL);
if (!pohmelfs_mcache_cache)
goto err_out_exit;
pohmelfs_mcache_pool = mempool_create_slab_pool(256, pohmelfs_mcache_cache);
if (!pohmelfs_mcache_pool)
goto err_out_free;
return 0;
err_out_free:
kmem_cache_destroy(pohmelfs_mcache_cache);
err_out_exit:
return -ENOMEM;
}
void pohmelfs_mcache_exit(void)
{
mempool_destroy(pohmelfs_mcache_pool);
kmem_cache_destroy(pohmelfs_mcache_cache);
}
| gpl-2.0 |
Alex-V2/One_M8_4.4.3_kernel | drivers/hwmon/max6639.c | 4849 | 19308 | /*
* max6639.c - Support for Maxim MAX6639
*
* 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller
*
* Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
*
* based on the initial MAX6639 support from semptian.net
* by He Changqing <hechangqing@semptian.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/i2c/max6639.h>
/* Addresses to scan */
static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
/* The MAX6639 registers, valid channel numbers: 0, 1 */
#define MAX6639_REG_TEMP(ch) (0x00 + (ch))
#define MAX6639_REG_STATUS 0x02
#define MAX6639_REG_OUTPUT_MASK 0x03
#define MAX6639_REG_GCONFIG 0x04
#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
#define MAX6639_REG_DEVID 0x3D
#define MAX6639_REG_MANUID 0x3E
#define MAX6639_REG_DEVREV 0x3F
/* Register bits */
#define MAX6639_GCONFIG_STANDBY 0x80
#define MAX6639_GCONFIG_POR 0x40
#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20
#define MAX6639_GCONFIG_CH2_LOCAL 0x10
#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
#define MAX6639_FAN_CONFIG1_PWM 0x80
#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
0 : (rpm_ranges[rpm_range] * 30) / (val))
#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
/*
* Client data (each client gets its own)
*/
struct max6639_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values sampled regularly */
u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
bool temp_fault[2]; /* Detected temperature diode failure */
u8 fan[2]; /* Register value: TACH count for fans >=30 */
u8 status; /* Detected channel alarms and fan failures */
/* Register values only written to */
u8 pwm[2]; /* Register value: Duty cycle 0..120 */
u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
/* Register values initialized only once */
u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range; /* Index in above rpm_ranges table */
};
static struct max6639_data *max6639_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_data *ret = data;
int i;
int status_reg;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
int res;
dev_dbg(&client->dev, "Starting max6639 update\n");
status_reg = i2c_smbus_read_byte_data(client,
MAX6639_REG_STATUS);
if (status_reg < 0) {
ret = ERR_PTR(status_reg);
goto abort;
}
data->status = status_reg;
for (i = 0; i < 2; i++) {
res = i2c_smbus_read_byte_data(client,
MAX6639_REG_FAN_CNT(i));
if (res < 0) {
ret = ERR_PTR(res);
goto abort;
}
data->fan[i] = res;
res = i2c_smbus_read_byte_data(client,
MAX6639_REG_TEMP_EXT(i));
if (res < 0) {
ret = ERR_PTR(res);
goto abort;
}
data->temp[i] = res >> 5;
data->temp_fault[i] = res & 0x01;
res = i2c_smbus_read_byte_data(client,
MAX6639_REG_TEMP(i));
if (res < 0) {
ret = ERR_PTR(res);
goto abort;
}
data->temp[i] |= res << 3;
}
data->last_updated = jiffies;
data->valid = 1;
}
abort:
mutex_unlock(&data->update_lock);
return ret;
}
static ssize_t show_temp_input(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
long temp;
struct max6639_data *data = max6639_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
if (IS_ERR(data))
return PTR_ERR(data);
temp = data->temp[attr->index] * 125;
return sprintf(buf, "%ld\n", temp);
}
static ssize_t show_temp_fault(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
}
static ssize_t show_temp_max(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
}
static ssize_t set_temp_max(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned long val;
int res;
res = kstrtoul(buf, 10, &val);
if (res)
return res;
mutex_lock(&data->update_lock);
data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
i2c_smbus_write_byte_data(client,
MAX6639_REG_THERM_LIMIT(attr->index),
data->temp_therm[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
}
static ssize_t set_temp_crit(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned long val;
int res;
res = kstrtoul(buf, 10, &val);
if (res)
return res;
mutex_lock(&data->update_lock);
data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
i2c_smbus_write_byte_data(client,
MAX6639_REG_ALERT_LIMIT(attr->index),
data->temp_alert[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
}
static ssize_t set_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned long val;
int res;
res = kstrtoul(buf, 10, &val);
if (res)
return res;
mutex_lock(&data->update_lock);
data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
i2c_smbus_write_byte_data(client,
MAX6639_REG_OT_LIMIT(attr->index),
data->temp_ot[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
}
static ssize_t set_pwm(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned long val;
int res;
res = kstrtoul(buf, 10, &val);
if (res)
return res;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[attr->index] = (u8)(val * 120 / 255);
i2c_smbus_write_byte_data(client,
MAX6639_REG_TARGTDUTY(attr->index),
data->pwm[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_fan_input(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
data->rpm_range));
}
static ssize_t show_alarm(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
set_temp_crit, 0);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
set_temp_crit, 1);
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
show_temp_emergency, set_temp_emergency, 0);
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
show_temp_emergency, set_temp_emergency, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
static struct attribute *max6639_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
&sensor_dev_attr_temp2_emergency.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_fan2_fault.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group max6639_group = {
.attrs = max6639_attributes,
};
/*
* returns respective index in rpm_ranges table
* 1 by default on invalid range
*/
static int rpm_range_to_reg(int range)
{
int i;
for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) {
if (rpm_ranges[i] == range)
return i;
}
return 1; /* default: 4000 RPM */
}
static int max6639_init_client(struct i2c_client *client)
{
struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_platform_data *max6639_info =
client->dev.platform_data;
int i;
int rpm_range = 1; /* default: 4000 RPM */
int err;
/* Reset chip to default values, see below for GCONFIG setup */
err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
MAX6639_GCONFIG_POR);
if (err)
goto exit;
/* Fans pulse per revolution is 2 by default */
if (max6639_info && max6639_info->ppr > 0 &&
max6639_info->ppr < 5)
data->ppr = max6639_info->ppr;
else
data->ppr = 2;
data->ppr -= 1;
if (max6639_info)
rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
data->rpm_range = rpm_range;
for (i = 0; i < 2; i++) {
/* Set Fan pulse per revolution */
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_PPR(i),
data->ppr << 6);
if (err)
goto exit;
/* Fans config PWM, RPM */
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_CONFIG1(i),
MAX6639_FAN_CONFIG1_PWM | rpm_range);
if (err)
goto exit;
/* Fans PWM polarity high by default */
if (max6639_info && max6639_info->pwm_polarity == 0)
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_CONFIG2a(i), 0x00);
else
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_CONFIG2a(i), 0x02);
if (err)
goto exit;
/*
* /THERM full speed enable,
* PWM frequency 25kHz, see also GCONFIG below
*/
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_CONFIG3(i),
MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
if (err)
goto exit;
/* Max. temp. 80C/90C/100C */
data->temp_therm[i] = 80;
data->temp_alert[i] = 90;
data->temp_ot[i] = 100;
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_THERM_LIMIT(i),
data->temp_therm[i]);
if (err)
goto exit;
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_ALERT_LIMIT(i),
data->temp_alert[i]);
if (err)
goto exit;
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
if (err)
goto exit;
/* PWM 120/120 (i.e. 100%) */
data->pwm[i] = 120;
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
if (err)
goto exit;
}
/* Start monitoring */
err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
MAX6639_GCONFIG_PWM_FREQ_HI);
exit:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int max6639_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int dev_id, manu_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Actual detection via device and manufacturer ID */
dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID);
manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID);
if (dev_id != 0x58 || manu_id != 0x4D)
return -ENODEV;
strlcpy(info->type, "max6639", I2C_NAME_SIZE);
return 0;
}
static int max6639_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max6639_data *data;
int err;
data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the max6639 chip */
err = max6639_init_client(client);
if (err < 0)
goto error_free;
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &max6639_group);
if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error_remove;
}
dev_info(&client->dev, "temperature sensor and fan control found\n");
return 0;
error_remove:
sysfs_remove_group(&client->dev.kobj, &max6639_group);
error_free:
kfree(data);
exit:
return err;
}
static int max6639_remove(struct i2c_client *client)
{
struct max6639_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max6639_group);
kfree(data);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int max6639_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
if (data < 0)
return data;
return i2c_smbus_write_byte_data(client,
MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
}
static int max6639_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
if (data < 0)
return data;
return i2c_smbus_write_byte_data(client,
MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
}
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id max6639_id[] = {
{"max6639", 0},
{ }
};
MODULE_DEVICE_TABLE(i2c, max6639_id);
static const struct dev_pm_ops max6639_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume)
};
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6639",
.pm = &max6639_pm_ops,
},
.probe = max6639_probe,
.remove = max6639_remove,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
};
module_i2c_driver(max6639_driver);
MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
MODULE_DESCRIPTION("max6639 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DesolationStaging/kernel_motorola_msm8226 | arch/sparc/kernel/pci_schizo.c | 4849 | 48989 | /* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
*
* Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/pstate.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#define DRIVER_NAME "schizo"
#define PFX DRIVER_NAME ": "
/* This is a convention that at least Excalibur and Merlin
* follow. I suppose the SCHIZO used in Starcat and friends
* will do similar.
*
* The only way I could see this changing is if the newlink
* block requires more space in Schizo's address space than
* they predicted, thus requiring an address space reorg when
* the newer Schizo is taped out.
*/
/* Streaming buffer control register. */
#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
/* IOMMU control register. */
#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
/* Schizo config space address format is nearly identical to
* that of PSYCHO:
*
* 32 24 23 16 15 11 10 8 7 2 1 0
* ---------------------------------------------------------
* |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
* ---------------------------------------------------------
*/
#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
unsigned char bus,
unsigned int devfn,
int where)
{
if (!pbm)
return NULL;
bus -= pbm->pci_first_busno;
return (void *)
(SCHIZO_CONFIG_BASE(pbm) |
SCHIZO_CONFIG_ENCODE(bus, devfn, where));
}
/* SCHIZO error handling support. */
enum schizo_error_type {
UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
};
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
static unsigned long stc_tag_buf[16];
static unsigned long stc_line_buf[16];
#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
#define SCHIZO_STCERR_WRITE 0x2UL
#define SCHIZO_STCERR_READ 0x1UL
#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
#define SCHIZO_STCTAG_READ 0x4000000000000000UL
#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
struct strbuf *strbuf = &pbm->stc;
unsigned long regbase = pbm->pbm_regs;
unsigned long err_base, tag_base, line_base;
u64 control;
int i;
err_base = regbase + SCHIZO_STC_ERR;
tag_base = regbase + SCHIZO_STC_TAG;
line_base = regbase + SCHIZO_STC_LINE;
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the
* streaming buffer into diagnostic mode to probe
* it's tags and error status, we _must_ clear all
* of the line tag valid bits before re-enabling
* the streaming buffer. If any dirty data lives
* in the STC when we do this, we will end up
* invalidating it before it has a chance to reach
* main memory.
*/
control = upa_readq(strbuf->strbuf_control);
upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB),
strbuf->strbuf_control);
for (i = 0; i < 128; i++) {
unsigned long val;
val = upa_readq(err_base + (i * 8UL));
upa_writeq(0UL, err_base + (i * 8UL));
stc_error_buf[i] = val;
}
for (i = 0; i < 16; i++) {
stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL));
stc_line_buf[i] = upa_readq(line_base + (i * 8UL));
upa_writeq(0UL, tag_base + (i * 8UL));
upa_writeq(0UL, line_base + (i * 8UL));
}
/* OK, state is logged, exit diagnostic mode. */
upa_writeq(control, strbuf->strbuf_control);
for (i = 0; i < 16; i++) {
int j, saw_error, first, last;
saw_error = 0;
first = i * 8;
last = first + 8;
for (j = first; j < last; j++) {
unsigned long errval = stc_error_buf[j];
if (errval != 0) {
saw_error++;
printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
pbm->name,
j,
(errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
(errval & SCHIZO_STCERR_READ) ? 1 : 0);
}
}
if (saw_error != 0) {
unsigned long tagval = stc_tag_buf[i];
unsigned long lineval = stc_line_buf[i];
printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
pbm->name,
i,
((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
(tagval & SCHIZO_STCTAG_VPN),
((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
/* XXX Should spit out per-bank error information... -DaveM */
printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
"V(%d)FOFN(%d)]\n",
pbm->name,
i,
((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
}
}
spin_unlock(&stc_buf_lock);
}
/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
* controller level errors.
*/
#define SCHIZO_IOMMU_TAG 0xa580UL
#define SCHIZO_IOMMU_DATA 0xa600UL
#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
struct iommu *iommu = pbm->iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
u64 control;
int i;
spin_lock_irqsave(&iommu->lock, flags);
control = upa_readq(iommu->iommu_control);
if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
unsigned long base;
char *type_string;
/* Clear the error encountered bit. */
control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
upa_writeq(control, iommu->iommu_control);
switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
type_string = "Protection Error";
break;
case 1:
type_string = "Invalid Error";
break;
case 2:
type_string = "TimeOut Error";
break;
case 3:
default:
type_string = "ECC Error";
break;
}
printk("%s: IOMMU Error, type[%s]\n",
pbm->name, type_string);
/* Put the IOMMU into diagnostic mode and probe
* it's TLB for entries with error status.
*
* It is very possible for another DVMA to occur
* while we do this probe, and corrupt the system
* further. But we are so screwed at this point
* that we are likely to crash hard anyways, so
* get as much diagnostic information to the
* console as we can.
*/
upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB,
iommu->iommu_control);
base = pbm->pbm_regs;
for (i = 0; i < 16; i++) {
iommu_tag[i] =
upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL));
iommu_data[i] =
upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL));
/* Now clear out the entry. */
upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL));
upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL));
}
/* Leave diagnostic mode. */
upa_writeq(control, iommu->iommu_control);
for (i = 0; i < 16; i++) {
unsigned long tag, data;
tag = iommu_tag[i];
if (!(tag & SCHIZO_IOMMU_TAG_ERR))
continue;
data = iommu_data[i];
switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
case 0:
type_string = "Protection Error";
break;
case 1:
type_string = "Invalid Error";
break;
case 2:
type_string = "TimeOut Error";
break;
case 3:
default:
type_string = "ECC Error";
break;
}
printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
"sz(%dK) vpg(%08lx)]\n",
pbm->name, i, type_string,
(int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
(tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
pbm->name, i,
((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
(data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
}
}
if (pbm->stc.strbuf_enabled)
__schizo_check_stc_error_pbm(pbm, type);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void schizo_check_iommu_error(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
schizo_check_iommu_error_pbm(pbm, type);
if (pbm->sibling)
schizo_check_iommu_error_pbm(pbm->sibling, type);
}
/* Uncorrectable ECC error status gathering. */
#define SCHIZO_UE_AFSR 0x10030UL
#define SCHIZO_UE_AFAR 0x10038UL
#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
static irqreturn_t schizo_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR;
unsigned long afsr, afar, error_bits;
int reported, limit;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
/* If either of the error pending bits are set in the
* AFSR, the error status is being actively updated by
* the hardware and we must re-read to get a clean value.
*/
limit = 1000;
do {
afsr = upa_readq(afsr_reg);
} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & SCHIZO_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SCHIZO_UEAFSR_PDWR) ?
"DMA Write" : "???")))));
printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
(afsr & SCHIZO_UEAFSR_AID) >> 24UL);
printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
(afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
(afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SCHIZO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SCHIZO_UEAFSR_SDMA) {
reported++;
printk("(DMA)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate IOMMU for error status. */
schizo_check_iommu_error(pbm, UE_ERR);
return IRQ_HANDLED;
}
#define SCHIZO_CE_AFSR 0x10040UL
#define SCHIZO_CE_AFAR 0x10048UL
#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
static irqreturn_t schizo_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR;
unsigned long afsr, afar, error_bits;
int reported, limit;
/* Latch error status. */
afar = upa_readq(afar_reg);
/* If either of the error pending bits are set in the
* AFSR, the error status is being actively updated by
* the hardware and we must re-read to get a clean value.
*/
limit = 1000;
do {
afsr = upa_readq(afsr_reg);
} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & SCHIZO_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SCHIZO_CEAFSR_PDWR) ?
"DMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
(afsr & SCHIZO_UEAFSR_AID) >> 24UL);
printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
(afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
(afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SCHIZO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SCHIZO_CEAFSR_SDMA) {
reported++;
printk("(DMA)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SCHIZO_PCI_AFSR 0x2010UL
#define SCHIZO_PCI_AFAR 0x2018UL
#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCI_CTRL (0x2000UL)
#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */
#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
{
unsigned long csr_reg, csr, csr_error_bits;
irqreturn_t ret = IRQ_NONE;
u16 stat;
csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
csr = upa_readq(csr_reg);
csr_error_bits =
csr & (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_DTO_ERR |
SCHIZO_PCICTRL_SBH_ERR |
SCHIZO_PCICTRL_SERR);
if (csr_error_bits) {
/* Clear the errors. */
upa_writeq(csr, csr_reg);
/* Log 'em. */
if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
printk("%s: Bus unusable error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
printk("%s: PCI TRDY# timeout error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
printk("%s: PCI excessive retry error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
printk("%s: PCI discard timeout error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
printk("%s: PCI streaming byte hole error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_SERR)
printk("%s: PCI SERR signal asserted.\n",
pbm->name);
ret = IRQ_HANDLED;
}
pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg, afar_reg, base;
unsigned long afsr, afar, error_bits;
int reported;
base = pbm->pbm_regs;
afsr_reg = base + SCHIZO_PCI_AFSR;
afar_reg = base + SCHIZO_PCI_AFAR;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
if (!error_bits)
return schizo_pcierr_intr_other(pbm);
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: PCI Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_PCIAFSR_PMA) ?
"Master Abort" :
((error_bits & SCHIZO_PCIAFSR_PTA) ?
"Target Abort" :
((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
"Excessive Retries" :
((error_bits & SCHIZO_PCIAFSR_PPERR) ?
"Parity Error" :
((error_bits & SCHIZO_PCIAFSR_PTTO) ?
"Timeout" :
((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
"Bus Unusable" : "???"))))))));
printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
pbm->name,
(afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
((afsr & SCHIZO_PCIAFSR_CFG) ?
"Config" :
((afsr & SCHIZO_PCIAFSR_MEM) ?
"Memory" :
((afsr & SCHIZO_PCIAFSR_IO) ?
"I/O" : "???"))));
printk("%s: PCI AFAR [%016lx]\n",
pbm->name, afar);
printk("%s: PCI Secondary errors [",
pbm->name);
reported = 0;
if (afsr & SCHIZO_PCIAFSR_SMA) {
reported++;
printk("(Master Abort)");
}
if (afsr & SCHIZO_PCIAFSR_STA) {
reported++;
printk("(Target Abort)");
}
if (afsr & SCHIZO_PCIAFSR_SRTRY) {
reported++;
printk("(Excessive Retries)");
}
if (afsr & SCHIZO_PCIAFSR_SPERR) {
reported++;
printk("(Parity Error)");
}
if (afsr & SCHIZO_PCIAFSR_STTO) {
reported++;
printk("(Timeout)");
}
if (afsr & SCHIZO_PCIAFSR_SUNUS) {
reported++;
printk("(Bus Unusable)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* For the error types shown, scan PBM's PCI bus for devices
* which have logged that error type.
*/
/* If we see a Target Abort, this could be the result of an
* IOMMU translation error of some sort. It is extremely
* useful to log this information as usually it indicates
* a bug in the IOMMU support code or a PCI device driver.
*/
if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
schizo_check_iommu_error(pbm, PCI_ERR);
pci_scan_for_target_abort(pbm, pbm->pci_bus);
}
if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
pci_scan_for_master_abort(pbm, pbm->pci_bus);
/* For excessive retries, PSYCHO/PBM will abort the device
* and there is no way to specifically check for excessive
* retries in the config space status registers. So what
* we hope is that we'll catch it via the master/target
* abort events.
*/
if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
pci_scan_for_parity_error(pbm, pbm->pci_bus);
return IRQ_HANDLED;
}
#define SCHIZO_SAFARI_ERRLOG 0x10018UL
#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
/* We only expect UNMAP errors here. The rest of the Safari errors
* are marked fatal and thus cause a system reset.
*/
static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
u64 errlog;
errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG);
upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT),
pbm->controller_regs + SCHIZO_SAFARI_ERRLOG);
if (!(errlog & BUS_ERROR_UNMAP)) {
printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n",
pbm->name, errlog);
return IRQ_HANDLED;
}
printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
pbm->name);
schizo_check_iommu_error(pbm, SAFARI_ERR);
return IRQ_HANDLED;
}
/* Nearly identical to PSYCHO equivalents... */
#define SCHIZO_ECC_CTRL 0x10020UL
#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino)
{
ino &= IMAP_INO;
if (pbm->ino_bitmap & (1UL << ino))
return 1;
return 0;
}
/* How the Tomatillo IRQs are routed around is pure guesswork here.
*
* All the Tomatillo devices I see in prtconf dumps seem to have only
* a single PCI bus unit attached to it. It would seem they are separate
* devices because their PortID (ie. JBUS ID) values are all different
* and thus the registers are mapped to totally different locations.
*
* However, two Tomatillo's look "similar" in that the only difference
* in their PortID is the lowest bit.
*
* So if we were to ignore this lower bit, it certainly looks like two
* PCI bus units of the same Tomatillo. I still have not really
* figured this out...
*/
static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm)
{
struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
/* Tomatillo IRQ property layout is:
* 0: PCIERR
* 1: UE ERR
* 2: CE ERR
* 3: SERR
* 4: POWER FAIL?
*/
if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) {
err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0,
"TOMATILLO_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register UE, "
"err=%d\n", pbm->name, err);
}
if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) {
err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0,
"TOMATILLO_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register CE, "
"err=%d\n", pbm->name, err);
}
err = 0;
if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"TOMATILLO_PCIERR", pbm);
} else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"TOMATILLO_PCIERR", pbm);
}
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) {
err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0,
"TOMATILLO_SERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register SERR, "
"err=%d\n", pbm->name, err);
}
/* Enable UE and CE interrupts for controller. */
upa_writeq((SCHIZO_ECCCTRL_EE |
SCHIZO_ECCCTRL_UE |
SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL);
/* Enable PCI Error interrupts and clear error
* bits.
*/
err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_SERR |
SCHIZO_PCICTRL_EEN);
err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp |= err_mask;
tmp &= ~err_no_mask;
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO);
upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR);
err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
BUS_ERROR_APERR | BUS_ERROR_UNMAP |
BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask),
pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL);
upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)),
pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL);
}
static void schizo_register_error_handlers(struct pci_pbm_info *pbm)
{
struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
/* Schizo IRQ property layout is:
* 0: PCIERR
* 1: UE ERR
* 2: CE ERR
* 3: SERR
* 4: POWER FAIL?
*/
if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) {
err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0,
"SCHIZO_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register UE, "
"err=%d\n", pbm->name, err);
}
if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) {
err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0,
"SCHIZO_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register CE, "
"err=%d\n", pbm->name, err);
}
err = 0;
if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"SCHIZO_PCIERR", pbm);
} else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"SCHIZO_PCIERR", pbm);
}
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) {
err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0,
"SCHIZO_SERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register SERR, "
"err=%d\n", pbm->name, err);
}
/* Enable UE and CE interrupts for controller. */
upa_writeq((SCHIZO_ECCCTRL_EE |
SCHIZO_ECCCTRL_UE |
SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL);
err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_ESLCK |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_SBH_ERR |
SCHIZO_PCICTRL_SERR |
SCHIZO_PCICTRL_EEN);
err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
SCHIZO_PCICTRL_SBH_INT);
/* Enable PCI Error interrupts and clear error
* bits for each PBM.
*/
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp |= err_mask;
tmp &= ~err_no_mask;
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS),
pbm->pbm_regs + SCHIZO_PCI_AFSR);
/* Make all Safari error conditions fatal except unmapped
* errors which we make generate interrupts.
*/
err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
BUS_ERROR_BADMA | BUS_ERROR_BADMB |
BUS_ERROR_BADMC |
BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
BUS_ERROR_CIQTO |
BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
BUS_ERROR_ILL);
#if 1
/* XXX Something wrong with some Excalibur systems
* XXX Sun is shipping. The behavior on a 2-cpu
* XXX machine is that both CPU1 parity error bits
* XXX are set and are immediately set again when
* XXX their error status bits are cleared. Just
* XXX ignore them for now. -DaveM
*/
err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
#endif
upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask),
pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL);
}
static void pbm_config_busmastering(struct pci_pbm_info *pbm)
{
u8 *addr;
/* Set cache-line size to 64 bytes, this is actually
* a nop but I do it for completeness.
*/
addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_CACHE_LINE_SIZE);
pci_config_write8(addr, 64 / sizeof(u32));
/* Set PBM latency timer to 64 PCI clocks. */
addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_LATENCY_TIMER);
pci_config_write8(addr, 64);
}
static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm,
struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable =
(of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL)
!= NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
tomatillo_register_error_handlers(pbm);
else
schizo_register_error_handlers(pbm);
}
#define SCHIZO_STRBUF_CONTROL (0x02800UL)
#define SCHIZO_STRBUF_FLUSH (0x02808UL)
#define SCHIZO_STRBUF_FSYNC (0x02810UL)
#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
#define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
{
unsigned long base = pbm->pbm_regs;
u64 control;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
/* TOMATILLO lacks streaming cache. */
return;
}
/* SCHIZO has context flushing. */
pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
pbm->stc.strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ 63UL)
& ~63UL);
pbm->stc.strbuf_flushflag_pa = (unsigned long)
__pa(pbm->stc.strbuf_flushflag);
/* Turn off LRU locking and diag mode, enable the
* streaming buffer and leave the rerun-disable
* setting however OBP set it.
*/
control = upa_readq(pbm->stc.strbuf_control);
control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
SCHIZO_STRBUF_CTRL_LENAB |
SCHIZO_STRBUF_CTRL_DENAB);
control |= SCHIZO_STRBUF_CTRL_ENAB;
upa_writeq(control, pbm->stc.strbuf_control);
pbm->stc.strbuf_enabled = 1;
}
#define SCHIZO_IOMMU_CONTROL (0x00200UL)
#define SCHIZO_IOMMU_TSBBASE (0x00208UL)
#define SCHIZO_IOMMU_FLUSH (0x00210UL)
#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0xc0000000, 0x40000000 };
unsigned long i, tagbase, database;
struct iommu *iommu = pbm->iommu;
int tsbsize, err;
const u32 *vdma;
u32 dma_mask;
u64 control;
vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
dma_mask = vdma[0];
switch (vdma[1]) {
case 0x20000000:
dma_mask |= 0x1fffffff;
tsbsize = 64;
break;
case 0x40000000:
dma_mask |= 0x3fffffff;
tsbsize = 128;
break;
case 0x80000000:
dma_mask |= 0x7fffffff;
tsbsize = 128;
break;
default:
printk(KERN_ERR PFX "Strange virtual-dma size.\n");
return -EINVAL;
}
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
/* We use the main control/status register of SCHIZO as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
/*
* Invalidate TLB Entries.
*/
control = upa_readq(iommu->iommu_control);
control |= SCHIZO_IOMMU_CTRL_DENAB;
upa_writeq(control, iommu->iommu_control);
tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
for (i = 0; i < 16; i++) {
upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL));
upa_writeq(0, pbm->pbm_regs + database + (i * 8UL));
}
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
pbm->numa_node);
if (err) {
printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err);
return err;
}
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
switch (tsbsize) {
case 64:
control |= SCHIZO_IOMMU_TSBSZ_64K;
break;
case 128:
control |= SCHIZO_IOMMU_TSBSZ_128K;
break;
}
control |= SCHIZO_IOMMU_CTRL_ENAB;
upa_writeq(control, iommu->iommu_control);
return 0;
}
#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
#define SCHIZO_IRQ_RETRY_INF 0xffUL
#define SCHIZO_PCI_DIAG (0x2020UL)
#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
#define TOMATILLO_PCI_IOC_CSR (0x2248UL)
#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
{
u64 tmp;
upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY);
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
/* Enable arbiter for all PCI slots. */
tmp |= 0xff;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
pbm->chip_version >= 0x2)
tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL))
tmp |= SCHIZO_PCICTRL_PARK;
else
tmp &= ~SCHIZO_PCICTRL_PARK;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
pbm->chip_version <= 0x1)
tmp |= SCHIZO_PCICTRL_DTO_INT;
else
tmp &= ~SCHIZO_PCICTRL_DTO_INT;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
tmp |= (SCHIZO_PCICTRL_MRM_PREF |
SCHIZO_PCICTRL_RDO_PREF |
SCHIZO_PCICTRL_RDL_PREF);
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG);
tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
SCHIZO_PCIDIAG_D_RETRY |
SCHIZO_PCIDIAG_D_INTSYNC);
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG);
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
/* Clear prefetch lengths to workaround a bug in
* Jalapeno...
*/
tmp = (TOMATILLO_IOC_PART_WPENAB |
(1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
TOMATILLO_IOC_RDMULT_CPENAB |
TOMATILLO_IOC_RDONE_CPENAB |
TOMATILLO_IOC_RDLINE_CPENAB);
upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR);
}
}
static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 portid,
int chip_type)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
chipset_name = "TOMATILLO";
break;
case PBM_CHIP_TYPE_SCHIZO_PLUS:
chipset_name = "SCHIZO+";
break;
case PBM_CHIP_TYPE_SCHIZO:
default:
chipset_name = "SCHIZO";
break;
}
/* For SCHIZO, three OBP regs:
* 1) PBM controller regs
* 2) Schizo front-end controller regs (same for both PBMs)
* 3) PBM PCI config space
*
* For TOMATILLO, four OBP regs:
* 1) PBM controller regs
* 2) Tomatillo front-end controller regs
* 3) PBM PCI config space
* 4) Ichip regs
*/
regs = of_get_property(dp, "reg", NULL);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
pbm->numa_node = -1;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
pbm->index = pci_num_pbms++;
pbm->portid = portid;
pbm->op = op;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
pbm->sync_reg = regs[3].phys_addr + 0x1a18UL;
pbm->name = dp->full_name;
printk("%s: %s PCI Bus Module ver[%x:%x]\n",
pbm->name, chipset_name,
pbm->chip_version, pbm->chip_revision);
schizo_pbm_hw_init(pbm);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
err = schizo_pbm_iommu_init(pbm);
if (err)
return err;
schizo_pbm_strbuf_init(pbm);
schizo_scan_bus(pbm, &op->dev);
return 0;
}
static inline int portid_compare(u32 x, u32 y, int chip_type)
{
if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
if (x == (y ^ 1))
return 1;
return 0;
}
return (x == y);
}
static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid,
int chip_type)
{
struct pci_pbm_info *pbm;
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (portid_compare(pbm->portid, portid, chip_type))
return pbm;
}
return NULL;
}
static int __devinit __schizo_init(struct platform_device *op, unsigned long chip_type)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
int err;
portid = of_getintprop_default(dp, "portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
pbm->sibling = schizo_find_sibling(portid, chip_type);
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n");
goto out_free_pbm;
}
pbm->iommu = iommu;
if (schizo_pbm_init(pbm, op, portid, chip_type))
goto out_free_iommu;
if (pbm->sibling)
pbm->sibling->sibling = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_pbm:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id schizo_match[];
static int __devinit schizo_probe(struct platform_device *op)
{
const struct of_device_id *match;
match = of_match_device(schizo_match, &op->dev);
if (!match)
return -EINVAL;
return __schizo_init(op, (unsigned long)match->data);
}
/* The ordering of this table is very important. Some Tomatillo
* nodes announce that they are compatible with both pci108e,a801
* and pci108e,8001. So list the chips in reverse chronological
* order.
*/
static const struct of_device_id schizo_match[] = {
{
.name = "pci",
.compatible = "pci108e,a801",
.data = (void *) PBM_CHIP_TYPE_TOMATILLO,
},
{
.name = "pci",
.compatible = "pci108e,8002",
.data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS,
},
{
.name = "pci",
.compatible = "pci108e,8001",
.data = (void *) PBM_CHIP_TYPE_SCHIZO,
},
{},
};
static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = schizo_match,
},
.probe = schizo_probe,
};
static int __init schizo_init(void)
{
return platform_driver_register(&schizo_driver);
}
subsys_initcall(schizo_init);
| gpl-2.0 |
mlongob/Linux-Kernel-Hack | sound/pci/trident/trident_main.c | 5105 | 124744 | /*
* Maintained by Jaroslav Kysela <perex@perex.cz>
* Originated by audio@tridentmicro.com
* Fri Feb 19 15:55:28 MST 1999
* Routines for control of Trident 4DWave (DX and NX) chip
*
* BUGS:
*
* TODO:
* ---
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* SiS7018 S/PDIF support by Thomas Winischhofer <thomas@winischhofer.net>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/gameport.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/trident.h>
#include <sound/asoundef.h>
#include <asm/io.h>
static int snd_trident_pcm_mixer_build(struct snd_trident *trident,
struct snd_trident_voice * voice,
struct snd_pcm_substream *substream);
static int snd_trident_pcm_mixer_free(struct snd_trident *trident,
struct snd_trident_voice * voice,
struct snd_pcm_substream *substream);
static irqreturn_t snd_trident_interrupt(int irq, void *dev_id);
static int snd_trident_sis_reset(struct snd_trident *trident);
static void snd_trident_clear_voices(struct snd_trident * trident,
unsigned short v_min, unsigned short v_max);
static int snd_trident_free(struct snd_trident *trident);
/*
* common I/O routines
*/
#if 0
static void snd_trident_print_voice_regs(struct snd_trident *trident, int voice)
{
unsigned int val, tmp;
printk(KERN_DEBUG "Trident voice %i:\n", voice);
outb(voice, TRID_REG(trident, T4D_LFO_GC_CIR));
val = inl(TRID_REG(trident, CH_LBA));
printk(KERN_DEBUG "LBA: 0x%x\n", val);
val = inl(TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC));
printk(KERN_DEBUG "GVSel: %i\n", val >> 31);
printk(KERN_DEBUG "Pan: 0x%x\n", (val >> 24) & 0x7f);
printk(KERN_DEBUG "Vol: 0x%x\n", (val >> 16) & 0xff);
printk(KERN_DEBUG "CTRL: 0x%x\n", (val >> 12) & 0x0f);
printk(KERN_DEBUG "EC: 0x%x\n", val & 0x0fff);
if (trident->device != TRIDENT_DEVICE_ID_NX) {
val = inl(TRID_REG(trident, CH_DX_CSO_ALPHA_FMS));
printk(KERN_DEBUG "CSO: 0x%x\n", val >> 16);
printk("Alpha: 0x%x\n", (val >> 4) & 0x0fff);
printk(KERN_DEBUG "FMS: 0x%x\n", val & 0x0f);
val = inl(TRID_REG(trident, CH_DX_ESO_DELTA));
printk(KERN_DEBUG "ESO: 0x%x\n", val >> 16);
printk(KERN_DEBUG "Delta: 0x%x\n", val & 0xffff);
val = inl(TRID_REG(trident, CH_DX_FMC_RVOL_CVOL));
} else { // TRIDENT_DEVICE_ID_NX
val = inl(TRID_REG(trident, CH_NX_DELTA_CSO));
tmp = (val >> 24) & 0xff;
printk(KERN_DEBUG "CSO: 0x%x\n", val & 0x00ffffff);
val = inl(TRID_REG(trident, CH_NX_DELTA_ESO));
tmp |= (val >> 16) & 0xff00;
printk(KERN_DEBUG "Delta: 0x%x\n", tmp);
printk(KERN_DEBUG "ESO: 0x%x\n", val & 0x00ffffff);
val = inl(TRID_REG(trident, CH_NX_ALPHA_FMS_FMC_RVOL_CVOL));
printk(KERN_DEBUG "Alpha: 0x%x\n", val >> 20);
printk(KERN_DEBUG "FMS: 0x%x\n", (val >> 16) & 0x0f);
}
printk(KERN_DEBUG "FMC: 0x%x\n", (val >> 14) & 3);
printk(KERN_DEBUG "RVol: 0x%x\n", (val >> 7) & 0x7f);
printk(KERN_DEBUG "CVol: 0x%x\n", val & 0x7f);
}
#endif
/*---------------------------------------------------------------------------
unsigned short snd_trident_codec_read(struct snd_ac97 *ac97, unsigned short reg)
Description: This routine will do all of the reading from the external
CODEC (AC97).
Parameters: ac97 - ac97 codec structure
reg - CODEC register index, from AC97 Hal.
returns: 16 bit value read from the AC97.
---------------------------------------------------------------------------*/
static unsigned short snd_trident_codec_read(struct snd_ac97 *ac97, unsigned short reg)
{
unsigned int data = 0, treg;
unsigned short count = 0xffff;
unsigned long flags;
struct snd_trident *trident = ac97->private_data;
spin_lock_irqsave(&trident->reg_lock, flags);
if (trident->device == TRIDENT_DEVICE_ID_DX) {
data = (DX_AC97_BUSY_READ | (reg & 0x000000ff));
outl(data, TRID_REG(trident, DX_ACR1_AC97_R));
do {
data = inl(TRID_REG(trident, DX_ACR1_AC97_R));
if ((data & DX_AC97_BUSY_READ) == 0)
break;
} while (--count);
} else if (trident->device == TRIDENT_DEVICE_ID_NX) {
data = (NX_AC97_BUSY_READ | (reg & 0x000000ff));
treg = ac97->num == 0 ? NX_ACR2_AC97_R_PRIMARY : NX_ACR3_AC97_R_SECONDARY;
outl(data, TRID_REG(trident, treg));
do {
data = inl(TRID_REG(trident, treg));
if ((data & 0x00000C00) == 0)
break;
} while (--count);
} else if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
data = SI_AC97_BUSY_READ | SI_AC97_AUDIO_BUSY | (reg & 0x000000ff);
if (ac97->num == 1)
data |= SI_AC97_SECONDARY;
outl(data, TRID_REG(trident, SI_AC97_READ));
do {
data = inl(TRID_REG(trident, SI_AC97_READ));
if ((data & (SI_AC97_BUSY_READ)) == 0)
break;
} while (--count);
}
if (count == 0 && !trident->ac97_detect) {
snd_printk(KERN_ERR "ac97 codec read TIMEOUT [0x%x/0x%x]!!!\n",
reg, data);
data = 0;
}
spin_unlock_irqrestore(&trident->reg_lock, flags);
return ((unsigned short) (data >> 16));
}
/*---------------------------------------------------------------------------
void snd_trident_codec_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short wdata)
Description: This routine will do all of the writing to the external
CODEC (AC97).
Parameters: ac97 - ac97 codec structure
reg - CODEC register index, from AC97 Hal.
data - Lower 16 bits are the data to write to CODEC.
returns: TRUE if everything went ok, else FALSE.
---------------------------------------------------------------------------*/
static void snd_trident_codec_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short wdata)
{
unsigned int address, data;
unsigned short count = 0xffff;
unsigned long flags;
struct snd_trident *trident = ac97->private_data;
data = ((unsigned long) wdata) << 16;
spin_lock_irqsave(&trident->reg_lock, flags);
if (trident->device == TRIDENT_DEVICE_ID_DX) {
address = DX_ACR0_AC97_W;
/* read AC-97 write register status */
do {
if ((inw(TRID_REG(trident, address)) & DX_AC97_BUSY_WRITE) == 0)
break;
} while (--count);
data |= (DX_AC97_BUSY_WRITE | (reg & 0x000000ff));
} else if (trident->device == TRIDENT_DEVICE_ID_NX) {
address = NX_ACR1_AC97_W;
/* read AC-97 write register status */
do {
if ((inw(TRID_REG(trident, address)) & NX_AC97_BUSY_WRITE) == 0)
break;
} while (--count);
data |= (NX_AC97_BUSY_WRITE | (ac97->num << 8) | (reg & 0x000000ff));
} else if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
address = SI_AC97_WRITE;
/* read AC-97 write register status */
do {
if ((inw(TRID_REG(trident, address)) & (SI_AC97_BUSY_WRITE)) == 0)
break;
} while (--count);
data |= SI_AC97_BUSY_WRITE | SI_AC97_AUDIO_BUSY | (reg & 0x000000ff);
if (ac97->num == 1)
data |= SI_AC97_SECONDARY;
} else {
address = 0; /* keep GCC happy */
count = 0; /* return */
}
if (count == 0) {
spin_unlock_irqrestore(&trident->reg_lock, flags);
return;
}
outl(data, TRID_REG(trident, address));
spin_unlock_irqrestore(&trident->reg_lock, flags);
}
/*---------------------------------------------------------------------------
void snd_trident_enable_eso(struct snd_trident *trident)
Description: This routine will enable end of loop interrupts.
End of loop interrupts will occur when a running
channel reaches ESO.
Also enables middle of loop interrupts.
Parameters: trident - pointer to target device class for 4DWave.
---------------------------------------------------------------------------*/
static void snd_trident_enable_eso(struct snd_trident * trident)
{
unsigned int val;
val = inl(TRID_REG(trident, T4D_LFO_GC_CIR));
val |= ENDLP_IE;
val |= MIDLP_IE;
if (trident->device == TRIDENT_DEVICE_ID_SI7018)
val |= BANK_B_EN;
outl(val, TRID_REG(trident, T4D_LFO_GC_CIR));
}
/*---------------------------------------------------------------------------
void snd_trident_disable_eso(struct snd_trident *trident)
Description: This routine will disable end of loop interrupts.
End of loop interrupts will occur when a running
channel reaches ESO.
Also disables middle of loop interrupts.
Parameters:
trident - pointer to target device class for 4DWave.
returns: TRUE if everything went ok, else FALSE.
---------------------------------------------------------------------------*/
static void snd_trident_disable_eso(struct snd_trident * trident)
{
unsigned int tmp;
tmp = inl(TRID_REG(trident, T4D_LFO_GC_CIR));
tmp &= ~ENDLP_IE;
tmp &= ~MIDLP_IE;
outl(tmp, TRID_REG(trident, T4D_LFO_GC_CIR));
}
/*---------------------------------------------------------------------------
void snd_trident_start_voice(struct snd_trident * trident, unsigned int voice)
Description: Start a voice, any channel 0 thru 63.
This routine automatically handles the fact that there are
more than 32 channels available.
Parameters : voice - Voice number 0 thru n.
trident - pointer to target device class for 4DWave.
Return Value: None.
---------------------------------------------------------------------------*/
void snd_trident_start_voice(struct snd_trident * trident, unsigned int voice)
{
unsigned int mask = 1 << (voice & 0x1f);
unsigned int reg = (voice & 0x20) ? T4D_START_B : T4D_START_A;
outl(mask, TRID_REG(trident, reg));
}
EXPORT_SYMBOL(snd_trident_start_voice);
/*---------------------------------------------------------------------------
void snd_trident_stop_voice(struct snd_trident * trident, unsigned int voice)
Description: Stop a voice, any channel 0 thru 63.
This routine automatically handles the fact that there are
more than 32 channels available.
Parameters : voice - Voice number 0 thru n.
trident - pointer to target device class for 4DWave.
Return Value: None.
---------------------------------------------------------------------------*/
void snd_trident_stop_voice(struct snd_trident * trident, unsigned int voice)
{
unsigned int mask = 1 << (voice & 0x1f);
unsigned int reg = (voice & 0x20) ? T4D_STOP_B : T4D_STOP_A;
outl(mask, TRID_REG(trident, reg));
}
EXPORT_SYMBOL(snd_trident_stop_voice);
/*---------------------------------------------------------------------------
int snd_trident_allocate_pcm_channel(struct snd_trident *trident)
Description: Allocate hardware channel in Bank B (32-63).
Parameters : trident - pointer to target device class for 4DWave.
Return Value: hardware channel - 32-63 or -1 when no channel is available
---------------------------------------------------------------------------*/
static int snd_trident_allocate_pcm_channel(struct snd_trident * trident)
{
int idx;
if (trident->ChanPCMcnt >= trident->ChanPCM)
return -1;
for (idx = 31; idx >= 0; idx--) {
if (!(trident->ChanMap[T4D_BANK_B] & (1 << idx))) {
trident->ChanMap[T4D_BANK_B] |= 1 << idx;
trident->ChanPCMcnt++;
return idx + 32;
}
}
return -1;
}
/*---------------------------------------------------------------------------
void snd_trident_free_pcm_channel(int channel)
Description: Free hardware channel in Bank B (32-63)
Parameters : trident - pointer to target device class for 4DWave.
channel - hardware channel number 0-63
Return Value: none
---------------------------------------------------------------------------*/
static void snd_trident_free_pcm_channel(struct snd_trident *trident, int channel)
{
if (channel < 32 || channel > 63)
return;
channel &= 0x1f;
if (trident->ChanMap[T4D_BANK_B] & (1 << channel)) {
trident->ChanMap[T4D_BANK_B] &= ~(1 << channel);
trident->ChanPCMcnt--;
}
}
/*---------------------------------------------------------------------------
unsigned int snd_trident_allocate_synth_channel(void)
Description: Allocate hardware channel in Bank A (0-31).
Parameters : trident - pointer to target device class for 4DWave.
Return Value: hardware channel - 0-31 or -1 when no channel is available
---------------------------------------------------------------------------*/
static int snd_trident_allocate_synth_channel(struct snd_trident * trident)
{
int idx;
for (idx = 31; idx >= 0; idx--) {
if (!(trident->ChanMap[T4D_BANK_A] & (1 << idx))) {
trident->ChanMap[T4D_BANK_A] |= 1 << idx;
trident->synth.ChanSynthCount++;
return idx;
}
}
return -1;
}
/*---------------------------------------------------------------------------
void snd_trident_free_synth_channel( int channel )
Description: Free hardware channel in Bank B (0-31).
Parameters : trident - pointer to target device class for 4DWave.
channel - hardware channel number 0-63
Return Value: none
---------------------------------------------------------------------------*/
static void snd_trident_free_synth_channel(struct snd_trident *trident, int channel)
{
if (channel < 0 || channel > 31)
return;
channel &= 0x1f;
if (trident->ChanMap[T4D_BANK_A] & (1 << channel)) {
trident->ChanMap[T4D_BANK_A] &= ~(1 << channel);
trident->synth.ChanSynthCount--;
}
}
/*---------------------------------------------------------------------------
snd_trident_write_voice_regs
Description: This routine will complete and write the 5 hardware channel
registers to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
Each register field.
---------------------------------------------------------------------------*/
void snd_trident_write_voice_regs(struct snd_trident * trident,
struct snd_trident_voice * voice)
{
unsigned int FmcRvolCvol;
unsigned int regs[5];
regs[1] = voice->LBA;
regs[4] = (voice->GVSel << 31) |
((voice->Pan & 0x0000007f) << 24) |
((voice->CTRL & 0x0000000f) << 12);
FmcRvolCvol = ((voice->FMC & 3) << 14) |
((voice->RVol & 0x7f) << 7) |
(voice->CVol & 0x7f);
switch (trident->device) {
case TRIDENT_DEVICE_ID_SI7018:
regs[4] |= voice->number > 31 ?
(voice->Vol & 0x000003ff) :
((voice->Vol & 0x00003fc) << (16-2)) |
(voice->EC & 0x00000fff);
regs[0] = (voice->CSO << 16) | ((voice->Alpha & 0x00000fff) << 4) |
(voice->FMS & 0x0000000f);
regs[2] = (voice->ESO << 16) | (voice->Delta & 0x0ffff);
regs[3] = (voice->Attribute << 16) | FmcRvolCvol;
break;
case TRIDENT_DEVICE_ID_DX:
regs[4] |= ((voice->Vol & 0x000003fc) << (16-2)) |
(voice->EC & 0x00000fff);
regs[0] = (voice->CSO << 16) | ((voice->Alpha & 0x00000fff) << 4) |
(voice->FMS & 0x0000000f);
regs[2] = (voice->ESO << 16) | (voice->Delta & 0x0ffff);
regs[3] = FmcRvolCvol;
break;
case TRIDENT_DEVICE_ID_NX:
regs[4] |= ((voice->Vol & 0x000003fc) << (16-2)) |
(voice->EC & 0x00000fff);
regs[0] = (voice->Delta << 24) | (voice->CSO & 0x00ffffff);
regs[2] = ((voice->Delta << 16) & 0xff000000) |
(voice->ESO & 0x00ffffff);
regs[3] = (voice->Alpha << 20) |
((voice->FMS & 0x0000000f) << 16) | FmcRvolCvol;
break;
default:
snd_BUG();
return;
}
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
outl(regs[0], TRID_REG(trident, CH_START + 0));
outl(regs[1], TRID_REG(trident, CH_START + 4));
outl(regs[2], TRID_REG(trident, CH_START + 8));
outl(regs[3], TRID_REG(trident, CH_START + 12));
outl(regs[4], TRID_REG(trident, CH_START + 16));
#if 0
printk(KERN_DEBUG "written %i channel:\n", voice->number);
printk(KERN_DEBUG " regs[0] = 0x%x/0x%x\n",
regs[0], inl(TRID_REG(trident, CH_START + 0)));
printk(KERN_DEBUG " regs[1] = 0x%x/0x%x\n",
regs[1], inl(TRID_REG(trident, CH_START + 4)));
printk(KERN_DEBUG " regs[2] = 0x%x/0x%x\n",
regs[2], inl(TRID_REG(trident, CH_START + 8)));
printk(KERN_DEBUG " regs[3] = 0x%x/0x%x\n",
regs[3], inl(TRID_REG(trident, CH_START + 12)));
printk(KERN_DEBUG " regs[4] = 0x%x/0x%x\n",
regs[4], inl(TRID_REG(trident, CH_START + 16)));
#endif
}
EXPORT_SYMBOL(snd_trident_write_voice_regs);
/*---------------------------------------------------------------------------
snd_trident_write_cso_reg
Description: This routine will write the new CSO offset
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
CSO - new CSO value
---------------------------------------------------------------------------*/
static void snd_trident_write_cso_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int CSO)
{
voice->CSO = CSO;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
if (trident->device != TRIDENT_DEVICE_ID_NX) {
outw(voice->CSO, TRID_REG(trident, CH_DX_CSO_ALPHA_FMS) + 2);
} else {
outl((voice->Delta << 24) |
(voice->CSO & 0x00ffffff), TRID_REG(trident, CH_NX_DELTA_CSO));
}
}
/*---------------------------------------------------------------------------
snd_trident_write_eso_reg
Description: This routine will write the new ESO offset
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
ESO - new ESO value
---------------------------------------------------------------------------*/
static void snd_trident_write_eso_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int ESO)
{
voice->ESO = ESO;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
if (trident->device != TRIDENT_DEVICE_ID_NX) {
outw(voice->ESO, TRID_REG(trident, CH_DX_ESO_DELTA) + 2);
} else {
outl(((voice->Delta << 16) & 0xff000000) | (voice->ESO & 0x00ffffff),
TRID_REG(trident, CH_NX_DELTA_ESO));
}
}
/*---------------------------------------------------------------------------
snd_trident_write_vol_reg
Description: This routine will write the new voice volume
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
Vol - new voice volume
---------------------------------------------------------------------------*/
static void snd_trident_write_vol_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int Vol)
{
voice->Vol = Vol;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
case TRIDENT_DEVICE_ID_NX:
outb(voice->Vol >> 2, TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC + 2));
break;
case TRIDENT_DEVICE_ID_SI7018:
/* printk(KERN_DEBUG "voice->Vol = 0x%x\n", voice->Vol); */
outw((voice->CTRL << 12) | voice->Vol,
TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC));
break;
}
}
/*---------------------------------------------------------------------------
snd_trident_write_pan_reg
Description: This routine will write the new voice pan
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
Pan - new pan value
---------------------------------------------------------------------------*/
static void snd_trident_write_pan_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int Pan)
{
voice->Pan = Pan;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
outb(((voice->GVSel & 0x01) << 7) | (voice->Pan & 0x7f),
TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC + 3));
}
/*---------------------------------------------------------------------------
snd_trident_write_rvol_reg
Description: This routine will write the new reverb volume
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
RVol - new reverb volume
---------------------------------------------------------------------------*/
static void snd_trident_write_rvol_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int RVol)
{
voice->RVol = RVol;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
outw(((voice->FMC & 0x0003) << 14) | ((voice->RVol & 0x007f) << 7) |
(voice->CVol & 0x007f),
TRID_REG(trident, trident->device == TRIDENT_DEVICE_ID_NX ?
CH_NX_ALPHA_FMS_FMC_RVOL_CVOL : CH_DX_FMC_RVOL_CVOL));
}
/*---------------------------------------------------------------------------
snd_trident_write_cvol_reg
Description: This routine will write the new chorus volume
register to hardware.
Parameters: trident - pointer to target device class for 4DWave.
voice - synthesizer voice structure
CVol - new chorus volume
---------------------------------------------------------------------------*/
static void snd_trident_write_cvol_reg(struct snd_trident * trident,
struct snd_trident_voice * voice,
unsigned int CVol)
{
voice->CVol = CVol;
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
outw(((voice->FMC & 0x0003) << 14) | ((voice->RVol & 0x007f) << 7) |
(voice->CVol & 0x007f),
TRID_REG(trident, trident->device == TRIDENT_DEVICE_ID_NX ?
CH_NX_ALPHA_FMS_FMC_RVOL_CVOL : CH_DX_FMC_RVOL_CVOL));
}
/*---------------------------------------------------------------------------
snd_trident_convert_rate
Description: This routine converts rate in HZ to hardware delta value.
Parameters: trident - pointer to target device class for 4DWave.
rate - Real or Virtual channel number.
Returns: Delta value.
---------------------------------------------------------------------------*/
static unsigned int snd_trident_convert_rate(unsigned int rate)
{
unsigned int delta;
// We special case 44100 and 8000 since rounding with the equation
// does not give us an accurate enough value. For 11025 and 22050
// the equation gives us the best answer. All other frequencies will
// also use the equation. JDW
if (rate == 44100)
delta = 0xeb3;
else if (rate == 8000)
delta = 0x2ab;
else if (rate == 48000)
delta = 0x1000;
else
delta = (((rate << 12) + 24000) / 48000) & 0x0000ffff;
return delta;
}
/*---------------------------------------------------------------------------
snd_trident_convert_adc_rate
Description: This routine converts rate in HZ to hardware delta value.
Parameters: trident - pointer to target device class for 4DWave.
rate - Real or Virtual channel number.
Returns: Delta value.
---------------------------------------------------------------------------*/
static unsigned int snd_trident_convert_adc_rate(unsigned int rate)
{
unsigned int delta;
// We special case 44100 and 8000 since rounding with the equation
// does not give us an accurate enough value. For 11025 and 22050
// the equation gives us the best answer. All other frequencies will
// also use the equation. JDW
if (rate == 44100)
delta = 0x116a;
else if (rate == 8000)
delta = 0x6000;
else if (rate == 48000)
delta = 0x1000;
else
delta = ((48000 << 12) / rate) & 0x0000ffff;
return delta;
}
/*---------------------------------------------------------------------------
snd_trident_spurious_threshold
Description: This routine converts rate in HZ to spurious threshold.
Parameters: trident - pointer to target device class for 4DWave.
rate - Real or Virtual channel number.
Returns: Delta value.
---------------------------------------------------------------------------*/
static unsigned int snd_trident_spurious_threshold(unsigned int rate,
unsigned int period_size)
{
unsigned int res = (rate * period_size) / 48000;
if (res < 64)
res = res / 2;
else
res -= 32;
return res;
}
/*---------------------------------------------------------------------------
snd_trident_control_mode
Description: This routine returns a control mode for a PCM channel.
Parameters: trident - pointer to target device class for 4DWave.
substream - PCM substream
Returns: Control value.
---------------------------------------------------------------------------*/
static unsigned int snd_trident_control_mode(struct snd_pcm_substream *substream)
{
unsigned int CTRL;
struct snd_pcm_runtime *runtime = substream->runtime;
/* set ctrl mode
CTRL default: 8-bit (unsigned) mono, loop mode enabled
*/
CTRL = 0x00000001;
if (snd_pcm_format_width(runtime->format) == 16)
CTRL |= 0x00000008; // 16-bit data
if (snd_pcm_format_signed(runtime->format))
CTRL |= 0x00000002; // signed data
if (runtime->channels > 1)
CTRL |= 0x00000004; // stereo data
return CTRL;
}
/*
* PCM part
*/
/*---------------------------------------------------------------------------
snd_trident_ioctl
Description: Device I/O control handler for playback/capture parameters.
Parameters: substream - PCM substream class
cmd - what ioctl message to process
arg - additional message infoarg
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd,
void *arg)
{
/* FIXME: it seems that with small periods the behaviour of
trident hardware is unpredictable and interrupt generator
is broken */
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
/*---------------------------------------------------------------------------
snd_trident_allocate_pcm_mem
Description: Allocate PCM ring buffer for given substream
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_allocate_pcm_mem(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
int err;
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
if (trident->tlb.entries) {
if (err > 0) { /* change */
if (voice->memblk)
snd_trident_free_pages(trident, voice->memblk);
voice->memblk = snd_trident_alloc_pages(trident, substream);
if (voice->memblk == NULL)
return -ENOMEM;
}
}
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_allocate_evoice
Description: Allocate extra voice as interrupt generator
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_allocate_evoice(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice->extra;
/* voice management */
if (params_buffer_size(hw_params) / 2 != params_period_size(hw_params)) {
if (evoice == NULL) {
evoice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0);
if (evoice == NULL)
return -ENOMEM;
voice->extra = evoice;
evoice->substream = substream;
}
} else {
if (evoice != NULL) {
snd_trident_free_voice(trident, evoice);
voice->extra = evoice = NULL;
}
}
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_hw_params
Description: Set the hardware parameters for the playback device.
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int err;
err = snd_trident_allocate_pcm_mem(substream, hw_params);
if (err >= 0)
err = snd_trident_allocate_evoice(substream, hw_params);
return err;
}
/*---------------------------------------------------------------------------
snd_trident_playback_hw_free
Description: Release the hardware resources for the playback device.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_hw_free(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice ? voice->extra : NULL;
if (trident->tlb.entries) {
if (voice && voice->memblk) {
snd_trident_free_pages(trident, voice->memblk);
voice->memblk = NULL;
}
}
snd_pcm_lib_free_pages(substream);
if (evoice != NULL) {
snd_trident_free_voice(trident, evoice);
voice->extra = NULL;
}
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_playback_prepare
Description: Prepare playback device for playback.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice->extra;
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[substream->number];
spin_lock_irq(&trident->reg_lock);
/* set delta (rate) value */
voice->Delta = snd_trident_convert_rate(runtime->rate);
voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size);
/* set Loop Begin Address */
if (voice->memblk)
voice->LBA = voice->memblk->offset;
else
voice->LBA = runtime->dma_addr;
voice->CSO = 0;
voice->ESO = runtime->buffer_size - 1; /* in samples */
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 3;
voice->GVSel = 1;
voice->EC = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Vol = mix->vol;
voice->RVol = mix->rvol;
voice->CVol = mix->cvol;
voice->Pan = mix->pan;
voice->Attribute = 0;
#if 0
voice->Attribute = (1<<(30-16))|(2<<(26-16))|
(0<<(24-16))|(0x1f<<(19-16));
#else
voice->Attribute = 0;
#endif
snd_trident_write_voice_regs(trident, voice);
if (evoice != NULL) {
evoice->Delta = voice->Delta;
evoice->spurious_threshold = voice->spurious_threshold;
evoice->LBA = voice->LBA;
evoice->CSO = 0;
evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */
evoice->CTRL = voice->CTRL;
evoice->FMC = 3;
evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1;
evoice->EC = 0;
evoice->Alpha = 0;
evoice->FMS = 0;
evoice->Vol = 0x3ff; /* mute */
evoice->RVol = evoice->CVol = 0x7f; /* mute */
evoice->Pan = 0x7f; /* mute */
#if 0
evoice->Attribute = (1<<(30-16))|(2<<(26-16))|
(0<<(24-16))|(0x1f<<(19-16));
#else
evoice->Attribute = 0;
#endif
snd_trident_write_voice_regs(trident, evoice);
evoice->isync2 = 1;
evoice->isync_mark = runtime->period_size;
evoice->ESO = (runtime->period_size * 2) - 1;
}
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_capture_hw_params
Description: Set the hardware parameters for the capture device.
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_trident_allocate_pcm_mem(substream, hw_params);
}
/*---------------------------------------------------------------------------
snd_trident_capture_prepare
Description: Prepare capture device for playback.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
unsigned int val, ESO_bytes;
spin_lock_irq(&trident->reg_lock);
// Initialize the channel and set channel Mode
outb(0, TRID_REG(trident, LEGACY_DMAR15));
// Set DMA channel operation mode register
outb(0x54, TRID_REG(trident, LEGACY_DMAR11));
// Set channel buffer Address, DMAR0 expects contiguous PCI memory area
voice->LBA = runtime->dma_addr;
outl(voice->LBA, TRID_REG(trident, LEGACY_DMAR0));
if (voice->memblk)
voice->LBA = voice->memblk->offset;
// set ESO
ESO_bytes = snd_pcm_lib_buffer_bytes(substream) - 1;
outb((ESO_bytes & 0x00ff0000) >> 16, TRID_REG(trident, LEGACY_DMAR6));
outw((ESO_bytes & 0x0000ffff), TRID_REG(trident, LEGACY_DMAR4));
ESO_bytes++;
// Set channel sample rate, 4.12 format
val = (((unsigned int) 48000L << 12) + (runtime->rate/2)) / runtime->rate;
outw(val, TRID_REG(trident, T4D_SBDELTA_DELTA_R));
// Set channel interrupt blk length
if (snd_pcm_format_width(runtime->format) == 16) {
val = (unsigned short) ((ESO_bytes >> 1) - 1);
} else {
val = (unsigned short) (ESO_bytes - 1);
}
outl((val << 16) | val, TRID_REG(trident, T4D_SBBL_SBCL));
// Right now, set format and start to run captureing,
// continuous run loop enable.
trident->bDMAStart = 0x19; // 0001 1001b
if (snd_pcm_format_width(runtime->format) == 16)
trident->bDMAStart |= 0x80;
if (snd_pcm_format_signed(runtime->format))
trident->bDMAStart |= 0x20;
if (runtime->channels > 1)
trident->bDMAStart |= 0x40;
// Prepare capture intr channel
voice->Delta = snd_trident_convert_rate(runtime->rate);
voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size);
voice->isync = 1;
voice->isync_mark = runtime->period_size;
voice->isync_max = runtime->buffer_size;
// Set voice parameters
voice->CSO = 0;
voice->ESO = voice->isync_ESO = (runtime->period_size * 2) + 6 - 1;
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 3;
voice->RVol = 0x7f;
voice->CVol = 0x7f;
voice->GVSel = 1;
voice->Pan = 0x7f; /* mute */
voice->Vol = 0x3ff; /* mute */
voice->EC = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Attribute = 0;
snd_trident_write_voice_regs(trident, voice);
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_si7018_capture_hw_params
Description: Set the hardware parameters for the capture device.
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_si7018_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int err;
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
return snd_trident_allocate_evoice(substream, hw_params);
}
/*---------------------------------------------------------------------------
snd_trident_si7018_capture_hw_free
Description: Release the hardware resources for the capture device.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_si7018_capture_hw_free(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice ? voice->extra : NULL;
snd_pcm_lib_free_pages(substream);
if (evoice != NULL) {
snd_trident_free_voice(trident, evoice);
voice->extra = NULL;
}
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_si7018_capture_prepare
Description: Prepare capture device for playback.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_si7018_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice->extra;
spin_lock_irq(&trident->reg_lock);
voice->LBA = runtime->dma_addr;
voice->Delta = snd_trident_convert_adc_rate(runtime->rate);
voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size);
// Set voice parameters
voice->CSO = 0;
voice->ESO = runtime->buffer_size - 1; /* in samples */
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 0;
voice->RVol = 0;
voice->CVol = 0;
voice->GVSel = 1;
voice->Pan = T4D_DEFAULT_PCM_PAN;
voice->Vol = 0;
voice->EC = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Attribute = (2 << (30-16)) |
(2 << (26-16)) |
(2 << (24-16)) |
(1 << (23-16));
snd_trident_write_voice_regs(trident, voice);
if (evoice != NULL) {
evoice->Delta = snd_trident_convert_rate(runtime->rate);
evoice->spurious_threshold = voice->spurious_threshold;
evoice->LBA = voice->LBA;
evoice->CSO = 0;
evoice->ESO = (runtime->period_size * 2) + 20 - 1; /* in samples, 20 means correction */
evoice->CTRL = voice->CTRL;
evoice->FMC = 3;
evoice->GVSel = 0;
evoice->EC = 0;
evoice->Alpha = 0;
evoice->FMS = 0;
evoice->Vol = 0x3ff; /* mute */
evoice->RVol = evoice->CVol = 0x7f; /* mute */
evoice->Pan = 0x7f; /* mute */
evoice->Attribute = 0;
snd_trident_write_voice_regs(trident, evoice);
evoice->isync2 = 1;
evoice->isync_mark = runtime->period_size;
evoice->ESO = (runtime->period_size * 2) - 1;
}
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_foldback_prepare
Description: Prepare foldback capture device for playback.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_foldback_prepare(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice->extra;
spin_lock_irq(&trident->reg_lock);
/* Set channel buffer Address */
if (voice->memblk)
voice->LBA = voice->memblk->offset;
else
voice->LBA = runtime->dma_addr;
/* set target ESO for channel */
voice->ESO = runtime->buffer_size - 1; /* in samples */
/* set sample rate */
voice->Delta = 0x1000;
voice->spurious_threshold = snd_trident_spurious_threshold(48000, runtime->period_size);
voice->CSO = 0;
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 3;
voice->RVol = 0x7f;
voice->CVol = 0x7f;
voice->GVSel = 1;
voice->Pan = 0x7f; /* mute */
voice->Vol = 0x3ff; /* mute */
voice->EC = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Attribute = 0;
/* set up capture channel */
outb(((voice->number & 0x3f) | 0x80), TRID_REG(trident, T4D_RCI + voice->foldback_chan));
snd_trident_write_voice_regs(trident, voice);
if (evoice != NULL) {
evoice->Delta = voice->Delta;
evoice->spurious_threshold = voice->spurious_threshold;
evoice->LBA = voice->LBA;
evoice->CSO = 0;
evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */
evoice->CTRL = voice->CTRL;
evoice->FMC = 3;
evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1;
evoice->EC = 0;
evoice->Alpha = 0;
evoice->FMS = 0;
evoice->Vol = 0x3ff; /* mute */
evoice->RVol = evoice->CVol = 0x7f; /* mute */
evoice->Pan = 0x7f; /* mute */
evoice->Attribute = 0;
snd_trident_write_voice_regs(trident, evoice);
evoice->isync2 = 1;
evoice->isync_mark = runtime->period_size;
evoice->ESO = (runtime->period_size * 2) - 1;
}
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_spdif_hw_params
Description: Set the hardware parameters for the spdif device.
Parameters: substream - PCM substream class
hw_params - hardware parameters
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
unsigned int old_bits = 0, change = 0;
int err;
err = snd_trident_allocate_pcm_mem(substream, hw_params);
if (err < 0)
return err;
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
err = snd_trident_allocate_evoice(substream, hw_params);
if (err < 0)
return err;
}
/* prepare SPDIF channel */
spin_lock_irq(&trident->reg_lock);
old_bits = trident->spdif_pcm_bits;
if (old_bits & IEC958_AES0_PROFESSIONAL)
trident->spdif_pcm_bits &= ~IEC958_AES0_PRO_FS;
else
trident->spdif_pcm_bits &= ~(IEC958_AES3_CON_FS << 24);
if (params_rate(hw_params) >= 48000) {
trident->spdif_pcm_ctrl = 0x3c; // 48000 Hz
trident->spdif_pcm_bits |=
trident->spdif_bits & IEC958_AES0_PROFESSIONAL ?
IEC958_AES0_PRO_FS_48000 :
(IEC958_AES3_CON_FS_48000 << 24);
}
else if (params_rate(hw_params) >= 44100) {
trident->spdif_pcm_ctrl = 0x3e; // 44100 Hz
trident->spdif_pcm_bits |=
trident->spdif_bits & IEC958_AES0_PROFESSIONAL ?
IEC958_AES0_PRO_FS_44100 :
(IEC958_AES3_CON_FS_44100 << 24);
}
else {
trident->spdif_pcm_ctrl = 0x3d; // 32000 Hz
trident->spdif_pcm_bits |=
trident->spdif_bits & IEC958_AES0_PROFESSIONAL ?
IEC958_AES0_PRO_FS_32000 :
(IEC958_AES3_CON_FS_32000 << 24);
}
change = old_bits != trident->spdif_pcm_bits;
spin_unlock_irq(&trident->reg_lock);
if (change)
snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE, &trident->spdif_pcm_ctl->id);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_spdif_prepare
Description: Prepare SPDIF device for playback.
Parameters: substream - PCM substream class
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_spdif_prepare(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident_voice *evoice = voice->extra;
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[substream->number];
unsigned int RESO, LBAO;
unsigned int temp;
spin_lock_irq(&trident->reg_lock);
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
/* set delta (rate) value */
voice->Delta = snd_trident_convert_rate(runtime->rate);
voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size);
/* set Loop Back Address */
LBAO = runtime->dma_addr;
if (voice->memblk)
voice->LBA = voice->memblk->offset;
else
voice->LBA = LBAO;
voice->isync = 1;
voice->isync3 = 1;
voice->isync_mark = runtime->period_size;
voice->isync_max = runtime->buffer_size;
/* set target ESO for channel */
RESO = runtime->buffer_size - 1;
voice->ESO = voice->isync_ESO = (runtime->period_size * 2) + 6 - 1;
/* set ctrl mode */
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 3;
voice->RVol = 0x7f;
voice->CVol = 0x7f;
voice->GVSel = 1;
voice->Pan = 0x7f;
voice->Vol = 0x3ff;
voice->EC = 0;
voice->CSO = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Attribute = 0;
/* prepare surrogate IRQ channel */
snd_trident_write_voice_regs(trident, voice);
outw((RESO & 0xffff), TRID_REG(trident, NX_SPESO));
outb((RESO >> 16), TRID_REG(trident, NX_SPESO + 2));
outl((LBAO & 0xfffffffc), TRID_REG(trident, NX_SPLBA));
outw((voice->CSO & 0xffff), TRID_REG(trident, NX_SPCTRL_SPCSO));
outb((voice->CSO >> 16), TRID_REG(trident, NX_SPCTRL_SPCSO + 2));
/* set SPDIF setting */
outb(trident->spdif_pcm_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS));
} else { /* SiS */
/* set delta (rate) value */
voice->Delta = 0x800;
voice->spurious_threshold = snd_trident_spurious_threshold(48000, runtime->period_size);
/* set Loop Begin Address */
if (voice->memblk)
voice->LBA = voice->memblk->offset;
else
voice->LBA = runtime->dma_addr;
voice->CSO = 0;
voice->ESO = runtime->buffer_size - 1; /* in samples */
voice->CTRL = snd_trident_control_mode(substream);
voice->FMC = 3;
voice->GVSel = 1;
voice->EC = 0;
voice->Alpha = 0;
voice->FMS = 0;
voice->Vol = mix->vol;
voice->RVol = mix->rvol;
voice->CVol = mix->cvol;
voice->Pan = mix->pan;
voice->Attribute = (1<<(30-16))|(7<<(26-16))|
(0<<(24-16))|(0<<(19-16));
snd_trident_write_voice_regs(trident, voice);
if (evoice != NULL) {
evoice->Delta = voice->Delta;
evoice->spurious_threshold = voice->spurious_threshold;
evoice->LBA = voice->LBA;
evoice->CSO = 0;
evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */
evoice->CTRL = voice->CTRL;
evoice->FMC = 3;
evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1;
evoice->EC = 0;
evoice->Alpha = 0;
evoice->FMS = 0;
evoice->Vol = 0x3ff; /* mute */
evoice->RVol = evoice->CVol = 0x7f; /* mute */
evoice->Pan = 0x7f; /* mute */
evoice->Attribute = 0;
snd_trident_write_voice_regs(trident, evoice);
evoice->isync2 = 1;
evoice->isync_mark = runtime->period_size;
evoice->ESO = (runtime->period_size * 2) - 1;
}
outl(trident->spdif_pcm_bits, TRID_REG(trident, SI_SPDIF_CS));
temp = inl(TRID_REG(trident, T4D_LFO_GC_CIR));
temp &= ~(1<<19);
outl(temp, TRID_REG(trident, T4D_LFO_GC_CIR));
temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL));
temp |= SPDIF_EN;
outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
}
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_trigger
Description: Start/stop devices
Parameters: substream - PCM substream class
cmd - trigger command (STOP, GO)
Returns: Error status
---------------------------------------------------------------------------*/
static int snd_trident_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *s;
unsigned int what, whati, capture_flag, spdif_flag;
struct snd_trident_voice *voice, *evoice;
unsigned int val, go;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
go = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
go = 0;
break;
default:
return -EINVAL;
}
what = whati = capture_flag = spdif_flag = 0;
spin_lock(&trident->reg_lock);
val = inl(TRID_REG(trident, T4D_STIMER)) & 0x00ffffff;
snd_pcm_group_for_each_entry(s, substream) {
if ((struct snd_trident *) snd_pcm_substream_chip(s) == trident) {
voice = s->runtime->private_data;
evoice = voice->extra;
what |= 1 << (voice->number & 0x1f);
if (evoice == NULL) {
whati |= 1 << (voice->number & 0x1f);
} else {
what |= 1 << (evoice->number & 0x1f);
whati |= 1 << (evoice->number & 0x1f);
if (go)
evoice->stimer = val;
}
if (go) {
voice->running = 1;
voice->stimer = val;
} else {
voice->running = 0;
}
snd_pcm_trigger_done(s, substream);
if (voice->capture)
capture_flag = 1;
if (voice->spdif)
spdif_flag = 1;
}
}
if (spdif_flag) {
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS));
val = trident->spdif_pcm_ctrl;
if (!go)
val &= ~(0x28);
outb(val, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
} else {
outl(trident->spdif_pcm_bits, TRID_REG(trident, SI_SPDIF_CS));
val = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) | SPDIF_EN;
outl(val, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
}
}
if (!go)
outl(what, TRID_REG(trident, T4D_STOP_B));
val = inl(TRID_REG(trident, T4D_AINTEN_B));
if (go) {
val |= whati;
} else {
val &= ~whati;
}
outl(val, TRID_REG(trident, T4D_AINTEN_B));
if (go) {
outl(what, TRID_REG(trident, T4D_START_B));
if (capture_flag && trident->device != TRIDENT_DEVICE_ID_SI7018)
outb(trident->bDMAStart, TRID_REG(trident, T4D_SBCTRL_SBE2R_SBDD));
} else {
if (capture_flag && trident->device != TRIDENT_DEVICE_ID_SI7018)
outb(0x00, TRID_REG(trident, T4D_SBCTRL_SBE2R_SBDD));
}
spin_unlock(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_playback_pointer
Description: This routine return the playback position
Parameters: substream - PCM substream class
Returns: position of buffer
---------------------------------------------------------------------------*/
static snd_pcm_uframes_t snd_trident_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
unsigned int cso;
if (!voice->running)
return 0;
spin_lock(&trident->reg_lock);
outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR));
if (trident->device != TRIDENT_DEVICE_ID_NX) {
cso = inw(TRID_REG(trident, CH_DX_CSO_ALPHA_FMS + 2));
} else { // ID_4DWAVE_NX
cso = (unsigned int) inl(TRID_REG(trident, CH_NX_DELTA_CSO)) & 0x00ffffff;
}
spin_unlock(&trident->reg_lock);
if (cso >= runtime->buffer_size)
cso = 0;
return cso;
}
/*---------------------------------------------------------------------------
snd_trident_capture_pointer
Description: This routine return the capture position
Parameters: pcm1 - PCM device class
Returns: position of buffer
---------------------------------------------------------------------------*/
static snd_pcm_uframes_t snd_trident_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
unsigned int result;
if (!voice->running)
return 0;
result = inw(TRID_REG(trident, T4D_SBBL_SBCL));
if (runtime->channels > 1)
result >>= 1;
if (result > 0)
result = runtime->buffer_size - result;
return result;
}
/*---------------------------------------------------------------------------
snd_trident_spdif_pointer
Description: This routine return the SPDIF playback position
Parameters: substream - PCM substream class
Returns: position of buffer
---------------------------------------------------------------------------*/
static snd_pcm_uframes_t snd_trident_spdif_pointer(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
unsigned int result;
if (!voice->running)
return 0;
result = inl(TRID_REG(trident, NX_SPCTRL_SPCSO)) & 0x00ffffff;
return result;
}
/*
* Playback support device description
*/
static struct snd_pcm_hardware snd_trident_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
.formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (256*1024),
.period_bytes_min = 64,
.period_bytes_max = (256*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
/*
* Capture support device description
*/
static struct snd_pcm_hardware snd_trident_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
.formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
/*
* Foldback capture support device description
*/
static struct snd_pcm_hardware snd_trident_foldback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
/*
* SPDIF playback support device description
*/
static struct snd_pcm_hardware snd_trident_spdif =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
.rate_min = 32000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static struct snd_pcm_hardware snd_trident_spdif_7018 =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static void snd_trident_pcm_free_substream(struct snd_pcm_runtime *runtime)
{
struct snd_trident_voice *voice = runtime->private_data;
struct snd_trident *trident;
if (voice) {
trident = voice->trident;
snd_trident_free_voice(trident, voice);
}
}
static int snd_trident_playback_open(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice;
voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0);
if (voice == NULL)
return -EAGAIN;
snd_trident_pcm_mixer_build(trident, voice, substream);
voice->substream = substream;
runtime->private_data = voice;
runtime->private_free = snd_trident_pcm_free_substream;
runtime->hw = snd_trident_playback;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_playback_close
Description: This routine will close the 4DWave playback device. For now
we will simply free the dma transfer buffer.
Parameters: substream - PCM substream class
---------------------------------------------------------------------------*/
static int snd_trident_playback_close(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_trident_voice *voice = runtime->private_data;
snd_trident_pcm_mixer_free(trident, voice, substream);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_spdif_open
Description: This routine will open the 4DWave SPDIF device.
Parameters: substream - PCM substream class
Returns: status - success or failure flag
---------------------------------------------------------------------------*/
static int snd_trident_spdif_open(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_trident_voice *voice;
struct snd_pcm_runtime *runtime = substream->runtime;
voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0);
if (voice == NULL)
return -EAGAIN;
voice->spdif = 1;
voice->substream = substream;
spin_lock_irq(&trident->reg_lock);
trident->spdif_pcm_bits = trident->spdif_bits;
spin_unlock_irq(&trident->reg_lock);
runtime->private_data = voice;
runtime->private_free = snd_trident_pcm_free_substream;
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
runtime->hw = snd_trident_spdif;
} else {
runtime->hw = snd_trident_spdif_7018;
}
trident->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO, &trident->spdif_pcm_ctl->id);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_spdif_close
Description: This routine will close the 4DWave SPDIF device.
Parameters: substream - PCM substream class
---------------------------------------------------------------------------*/
static int snd_trident_spdif_close(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
unsigned int temp;
spin_lock_irq(&trident->reg_lock);
// restore default SPDIF setting
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS));
} else {
outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS));
temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL));
if (trident->spdif_ctrl) {
temp |= SPDIF_EN;
} else {
temp &= ~SPDIF_EN;
}
outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
}
spin_unlock_irq(&trident->reg_lock);
trident->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO, &trident->spdif_pcm_ctl->id);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_capture_open
Description: This routine will open the 4DWave capture device.
Parameters: substream - PCM substream class
Returns: status - success or failure flag
---------------------------------------------------------------------------*/
static int snd_trident_capture_open(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_trident_voice *voice;
struct snd_pcm_runtime *runtime = substream->runtime;
voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0);
if (voice == NULL)
return -EAGAIN;
voice->capture = 1;
voice->substream = substream;
runtime->private_data = voice;
runtime->private_free = snd_trident_pcm_free_substream;
runtime->hw = snd_trident_capture;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_capture_close
Description: This routine will close the 4DWave capture device. For now
we will simply free the dma transfer buffer.
Parameters: substream - PCM substream class
---------------------------------------------------------------------------*/
static int snd_trident_capture_close(struct snd_pcm_substream *substream)
{
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_foldback_open
Description: This routine will open the 4DWave foldback capture device.
Parameters: substream - PCM substream class
Returns: status - success or failure flag
---------------------------------------------------------------------------*/
static int snd_trident_foldback_open(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_trident_voice *voice;
struct snd_pcm_runtime *runtime = substream->runtime;
voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0);
if (voice == NULL)
return -EAGAIN;
voice->foldback_chan = substream->number;
voice->substream = substream;
runtime->private_data = voice;
runtime->private_free = snd_trident_pcm_free_substream;
runtime->hw = snd_trident_foldback;
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_foldback_close
Description: This routine will close the 4DWave foldback capture device.
For now we will simply free the dma transfer buffer.
Parameters: substream - PCM substream class
---------------------------------------------------------------------------*/
static int snd_trident_foldback_close(struct snd_pcm_substream *substream)
{
struct snd_trident *trident = snd_pcm_substream_chip(substream);
struct snd_trident_voice *voice;
struct snd_pcm_runtime *runtime = substream->runtime;
voice = runtime->private_data;
/* stop capture channel */
spin_lock_irq(&trident->reg_lock);
outb(0x00, TRID_REG(trident, T4D_RCI + voice->foldback_chan));
spin_unlock_irq(&trident->reg_lock);
return 0;
}
/*---------------------------------------------------------------------------
PCM operations
---------------------------------------------------------------------------*/
static struct snd_pcm_ops snd_trident_playback_ops = {
.open = snd_trident_playback_open,
.close = snd_trident_playback_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_playback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
};
static struct snd_pcm_ops snd_trident_nx_playback_ops = {
.open = snd_trident_playback_open,
.close = snd_trident_playback_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_playback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
static struct snd_pcm_ops snd_trident_capture_ops = {
.open = snd_trident_capture_open,
.close = snd_trident_capture_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_capture_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_capture_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_capture_pointer,
};
static struct snd_pcm_ops snd_trident_si7018_capture_ops = {
.open = snd_trident_capture_open,
.close = snd_trident_capture_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_si7018_capture_hw_params,
.hw_free = snd_trident_si7018_capture_hw_free,
.prepare = snd_trident_si7018_capture_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
};
static struct snd_pcm_ops snd_trident_foldback_ops = {
.open = snd_trident_foldback_open,
.close = snd_trident_foldback_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_foldback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
};
static struct snd_pcm_ops snd_trident_nx_foldback_ops = {
.open = snd_trident_foldback_open,
.close = snd_trident_foldback_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_foldback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
static struct snd_pcm_ops snd_trident_spdif_ops = {
.open = snd_trident_spdif_open,
.close = snd_trident_spdif_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_spdif_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_spdif_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_spdif_pointer,
};
static struct snd_pcm_ops snd_trident_spdif_7018_ops = {
.open = snd_trident_spdif_open,
.close = snd_trident_spdif_close,
.ioctl = snd_trident_ioctl,
.hw_params = snd_trident_spdif_hw_params,
.hw_free = snd_trident_hw_free,
.prepare = snd_trident_spdif_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
};
/*---------------------------------------------------------------------------
snd_trident_pcm
Description: This routine registers the 4DWave device for PCM support.
Parameters: trident - pointer to target device class for 4DWave.
Returns: None
---------------------------------------------------------------------------*/
int __devinit snd_trident_pcm(struct snd_trident * trident,
int device, struct snd_pcm ** rpcm)
{
struct snd_pcm *pcm;
int err;
if (rpcm)
*rpcm = NULL;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, trident->ChanPCM, 1, &pcm)) < 0)
return err;
pcm->private_data = trident;
if (trident->tlb.entries) {
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_nx_playback_ops);
} else {
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_playback_ops);
}
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
trident->device != TRIDENT_DEVICE_ID_SI7018 ?
&snd_trident_capture_ops :
&snd_trident_si7018_capture_ops);
pcm->info_flags = 0;
pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX;
strcpy(pcm->name, "Trident 4DWave");
trident->pcm = pcm;
if (trident->tlb.entries) {
struct snd_pcm_substream *substream;
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(trident->pci),
64*1024, 128*1024);
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
64*1024, 128*1024);
} else {
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
}
if (rpcm)
*rpcm = pcm;
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_foldback_pcm
Description: This routine registers the 4DWave device for foldback PCM support.
Parameters: trident - pointer to target device class for 4DWave.
Returns: None
---------------------------------------------------------------------------*/
int __devinit snd_trident_foldback_pcm(struct snd_trident * trident,
int device, struct snd_pcm ** rpcm)
{
struct snd_pcm *foldback;
int err;
int num_chan = 3;
struct snd_pcm_substream *substream;
if (rpcm)
*rpcm = NULL;
if (trident->device == TRIDENT_DEVICE_ID_NX)
num_chan = 4;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, 0, num_chan, &foldback)) < 0)
return err;
foldback->private_data = trident;
if (trident->tlb.entries)
snd_pcm_set_ops(foldback, SNDRV_PCM_STREAM_CAPTURE, &snd_trident_nx_foldback_ops);
else
snd_pcm_set_ops(foldback, SNDRV_PCM_STREAM_CAPTURE, &snd_trident_foldback_ops);
foldback->info_flags = 0;
strcpy(foldback->name, "Trident 4DWave");
substream = foldback->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
strcpy(substream->name, "Front Mixer");
substream = substream->next;
strcpy(substream->name, "Reverb Mixer");
substream = substream->next;
strcpy(substream->name, "Chorus Mixer");
if (num_chan == 4) {
substream = substream->next;
strcpy(substream->name, "Second AC'97 ADC");
}
trident->foldback = foldback;
if (trident->tlb.entries)
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(trident->pci), 0, 128*1024);
else
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
if (rpcm)
*rpcm = foldback;
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_spdif
Description: This routine registers the 4DWave-NX device for SPDIF support.
Parameters: trident - pointer to target device class for 4DWave-NX.
Returns: None
---------------------------------------------------------------------------*/
int __devinit snd_trident_spdif_pcm(struct snd_trident * trident,
int device, struct snd_pcm ** rpcm)
{
struct snd_pcm *spdif;
int err;
if (rpcm)
*rpcm = NULL;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx IEC958", device, 1, 0, &spdif)) < 0)
return err;
spdif->private_data = trident;
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
snd_pcm_set_ops(spdif, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_spdif_ops);
} else {
snd_pcm_set_ops(spdif, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_spdif_7018_ops);
}
spdif->info_flags = 0;
strcpy(spdif->name, "Trident 4DWave IEC958");
trident->spdif = spdif;
snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
if (rpcm)
*rpcm = spdif;
return 0;
}
/*
* Mixer part
*/
/*---------------------------------------------------------------------------
snd_trident_spdif_control
Description: enable/disable S/PDIF out from ac97 mixer
---------------------------------------------------------------------------*/
#define snd_trident_spdif_control_info snd_ctl_boolean_mono_info
static int snd_trident_spdif_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned char val;
spin_lock_irq(&trident->reg_lock);
val = trident->spdif_ctrl;
ucontrol->value.integer.value[0] = val == kcontrol->private_value;
spin_unlock_irq(&trident->reg_lock);
return 0;
}
static int snd_trident_spdif_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned char val;
int change;
val = ucontrol->value.integer.value[0] ? (unsigned char) kcontrol->private_value : 0x00;
spin_lock_irq(&trident->reg_lock);
/* S/PDIF C Channel bits 0-31 : 48khz, SCMS disabled */
change = trident->spdif_ctrl != val;
trident->spdif_ctrl = val;
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
if ((inb(TRID_REG(trident, NX_SPCTRL_SPCSO + 3)) & 0x10) == 0) {
outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS));
outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
}
} else {
if (trident->spdif == NULL) {
unsigned int temp;
outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS));
temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & ~SPDIF_EN;
if (val)
temp |= SPDIF_EN;
outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
}
}
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_spdif_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH),
.info = snd_trident_spdif_control_info,
.get = snd_trident_spdif_control_get,
.put = snd_trident_spdif_control_put,
.private_value = 0x28,
};
/*---------------------------------------------------------------------------
snd_trident_spdif_default
Description: put/get the S/PDIF default settings
---------------------------------------------------------------------------*/
static int snd_trident_spdif_default_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_trident_spdif_default_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&trident->reg_lock);
ucontrol->value.iec958.status[0] = (trident->spdif_bits >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (trident->spdif_bits >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (trident->spdif_bits >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (trident->spdif_bits >> 24) & 0xff;
spin_unlock_irq(&trident->reg_lock);
return 0;
}
static int snd_trident_spdif_default_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned int val;
int change;
val = (ucontrol->value.iec958.status[0] << 0) |
(ucontrol->value.iec958.status[1] << 8) |
(ucontrol->value.iec958.status[2] << 16) |
(ucontrol->value.iec958.status[3] << 24);
spin_lock_irq(&trident->reg_lock);
change = trident->spdif_bits != val;
trident->spdif_bits = val;
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
if ((inb(TRID_REG(trident, NX_SPCTRL_SPCSO + 3)) & 0x10) == 0)
outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS));
} else {
if (trident->spdif == NULL)
outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS));
}
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_spdif_default __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
.info = snd_trident_spdif_default_info,
.get = snd_trident_spdif_default_get,
.put = snd_trident_spdif_default_put
};
/*---------------------------------------------------------------------------
snd_trident_spdif_mask
Description: put/get the S/PDIF mask
---------------------------------------------------------------------------*/
static int snd_trident_spdif_mask_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_trident_spdif_mask_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = 0xff;
ucontrol->value.iec958.status[1] = 0xff;
ucontrol->value.iec958.status[2] = 0xff;
ucontrol->value.iec958.status[3] = 0xff;
return 0;
}
static struct snd_kcontrol_new snd_trident_spdif_mask __devinitdata =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
.info = snd_trident_spdif_mask_info,
.get = snd_trident_spdif_mask_get,
};
/*---------------------------------------------------------------------------
snd_trident_spdif_stream
Description: put/get the S/PDIF stream settings
---------------------------------------------------------------------------*/
static int snd_trident_spdif_stream_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_trident_spdif_stream_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&trident->reg_lock);
ucontrol->value.iec958.status[0] = (trident->spdif_pcm_bits >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (trident->spdif_pcm_bits >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (trident->spdif_pcm_bits >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (trident->spdif_pcm_bits >> 24) & 0xff;
spin_unlock_irq(&trident->reg_lock);
return 0;
}
static int snd_trident_spdif_stream_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned int val;
int change;
val = (ucontrol->value.iec958.status[0] << 0) |
(ucontrol->value.iec958.status[1] << 8) |
(ucontrol->value.iec958.status[2] << 16) |
(ucontrol->value.iec958.status[3] << 24);
spin_lock_irq(&trident->reg_lock);
change = trident->spdif_pcm_bits != val;
trident->spdif_pcm_bits = val;
if (trident->spdif != NULL) {
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS));
} else {
outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS));
}
}
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_spdif_stream __devinitdata =
{
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM),
.info = snd_trident_spdif_stream_info,
.get = snd_trident_spdif_stream_get,
.put = snd_trident_spdif_stream_put
};
/*---------------------------------------------------------------------------
snd_trident_ac97_control
Description: enable/disable rear path for ac97
---------------------------------------------------------------------------*/
#define snd_trident_ac97_control_info snd_ctl_boolean_mono_info
static int snd_trident_ac97_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned char val;
spin_lock_irq(&trident->reg_lock);
val = trident->ac97_ctrl = inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
ucontrol->value.integer.value[0] = (val & (1 << kcontrol->private_value)) ? 1 : 0;
spin_unlock_irq(&trident->reg_lock);
return 0;
}
static int snd_trident_ac97_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned char val;
int change = 0;
spin_lock_irq(&trident->reg_lock);
val = trident->ac97_ctrl = inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
val &= ~(1 << kcontrol->private_value);
if (ucontrol->value.integer.value[0])
val |= 1 << kcontrol->private_value;
change = val != trident->ac97_ctrl;
trident->ac97_ctrl = val;
outl(trident->ac97_ctrl = val, TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_ac97_rear_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Rear Path",
.info = snd_trident_ac97_control_info,
.get = snd_trident_ac97_control_get,
.put = snd_trident_ac97_control_put,
.private_value = 4,
};
/*---------------------------------------------------------------------------
snd_trident_vol_control
Description: wave & music volume control
---------------------------------------------------------------------------*/
static int snd_trident_vol_control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 255;
return 0;
}
static int snd_trident_vol_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned int val;
val = trident->musicvol_wavevol;
ucontrol->value.integer.value[0] = 255 - ((val >> kcontrol->private_value) & 0xff);
ucontrol->value.integer.value[1] = 255 - ((val >> (kcontrol->private_value + 8)) & 0xff);
return 0;
}
static const DECLARE_TLV_DB_SCALE(db_scale_gvol, -6375, 25, 0);
static int snd_trident_vol_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
unsigned int val;
int change = 0;
spin_lock_irq(&trident->reg_lock);
val = trident->musicvol_wavevol;
val &= ~(0xffff << kcontrol->private_value);
val |= ((255 - (ucontrol->value.integer.value[0] & 0xff)) |
((255 - (ucontrol->value.integer.value[1] & 0xff)) << 8)) << kcontrol->private_value;
change = val != trident->musicvol_wavevol;
outl(trident->musicvol_wavevol = val, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL));
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_vol_music_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Music Playback Volume",
.info = snd_trident_vol_control_info,
.get = snd_trident_vol_control_get,
.put = snd_trident_vol_control_put,
.private_value = 16,
.tlv = { .p = db_scale_gvol },
};
static struct snd_kcontrol_new snd_trident_vol_wave_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Wave Playback Volume",
.info = snd_trident_vol_control_info,
.get = snd_trident_vol_control_get,
.put = snd_trident_vol_control_put,
.private_value = 0,
.tlv = { .p = db_scale_gvol },
};
/*---------------------------------------------------------------------------
snd_trident_pcm_vol_control
Description: PCM front volume control
---------------------------------------------------------------------------*/
static int snd_trident_pcm_vol_control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 255;
if (trident->device == TRIDENT_DEVICE_ID_SI7018)
uinfo->value.integer.max = 1023;
return 0;
}
static int snd_trident_pcm_vol_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
ucontrol->value.integer.value[0] = 1023 - mix->vol;
} else {
ucontrol->value.integer.value[0] = 255 - (mix->vol>>2);
}
return 0;
}
static int snd_trident_pcm_vol_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
unsigned int val;
int change = 0;
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
val = 1023 - (ucontrol->value.integer.value[0] & 1023);
} else {
val = (255 - (ucontrol->value.integer.value[0] & 255)) << 2;
}
spin_lock_irq(&trident->reg_lock);
change = val != mix->vol;
mix->vol = val;
if (mix->voice != NULL)
snd_trident_write_vol_reg(trident, mix->voice, val);
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_pcm_vol_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Front Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.count = 32,
.info = snd_trident_pcm_vol_control_info,
.get = snd_trident_pcm_vol_control_get,
.put = snd_trident_pcm_vol_control_put,
/* FIXME: no tlv yet */
};
/*---------------------------------------------------------------------------
snd_trident_pcm_pan_control
Description: PCM front pan control
---------------------------------------------------------------------------*/
static int snd_trident_pcm_pan_control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 127;
return 0;
}
static int snd_trident_pcm_pan_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
ucontrol->value.integer.value[0] = mix->pan;
if (ucontrol->value.integer.value[0] & 0x40) {
ucontrol->value.integer.value[0] = (0x3f - (ucontrol->value.integer.value[0] & 0x3f));
} else {
ucontrol->value.integer.value[0] |= 0x40;
}
return 0;
}
static int snd_trident_pcm_pan_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
unsigned char val;
int change = 0;
if (ucontrol->value.integer.value[0] & 0x40)
val = ucontrol->value.integer.value[0] & 0x3f;
else
val = (0x3f - (ucontrol->value.integer.value[0] & 0x3f)) | 0x40;
spin_lock_irq(&trident->reg_lock);
change = val != mix->pan;
mix->pan = val;
if (mix->voice != NULL)
snd_trident_write_pan_reg(trident, mix->voice, val);
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_pcm_pan_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Pan Playback Control",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.count = 32,
.info = snd_trident_pcm_pan_control_info,
.get = snd_trident_pcm_pan_control_get,
.put = snd_trident_pcm_pan_control_put,
};
/*---------------------------------------------------------------------------
snd_trident_pcm_rvol_control
Description: PCM reverb volume control
---------------------------------------------------------------------------*/
static int snd_trident_pcm_rvol_control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 127;
return 0;
}
static int snd_trident_pcm_rvol_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
ucontrol->value.integer.value[0] = 127 - mix->rvol;
return 0;
}
static int snd_trident_pcm_rvol_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
unsigned short val;
int change = 0;
val = 0x7f - (ucontrol->value.integer.value[0] & 0x7f);
spin_lock_irq(&trident->reg_lock);
change = val != mix->rvol;
mix->rvol = val;
if (mix->voice != NULL)
snd_trident_write_rvol_reg(trident, mix->voice, val);
spin_unlock_irq(&trident->reg_lock);
return change;
}
static const DECLARE_TLV_DB_SCALE(db_scale_crvol, -3175, 25, 1);
static struct snd_kcontrol_new snd_trident_pcm_rvol_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Reverb Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.count = 32,
.info = snd_trident_pcm_rvol_control_info,
.get = snd_trident_pcm_rvol_control_get,
.put = snd_trident_pcm_rvol_control_put,
.tlv = { .p = db_scale_crvol },
};
/*---------------------------------------------------------------------------
snd_trident_pcm_cvol_control
Description: PCM chorus volume control
---------------------------------------------------------------------------*/
static int snd_trident_pcm_cvol_control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 127;
return 0;
}
static int snd_trident_pcm_cvol_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
ucontrol->value.integer.value[0] = 127 - mix->cvol;
return 0;
}
static int snd_trident_pcm_cvol_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_trident *trident = snd_kcontrol_chip(kcontrol);
struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)];
unsigned short val;
int change = 0;
val = 0x7f - (ucontrol->value.integer.value[0] & 0x7f);
spin_lock_irq(&trident->reg_lock);
change = val != mix->cvol;
mix->cvol = val;
if (mix->voice != NULL)
snd_trident_write_cvol_reg(trident, mix->voice, val);
spin_unlock_irq(&trident->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_trident_pcm_cvol_control __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Chorus Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.count = 32,
.info = snd_trident_pcm_cvol_control_info,
.get = snd_trident_pcm_cvol_control_get,
.put = snd_trident_pcm_cvol_control_put,
.tlv = { .p = db_scale_crvol },
};
static void snd_trident_notify_pcm_change1(struct snd_card *card,
struct snd_kcontrol *kctl,
int num, int activate)
{
struct snd_ctl_elem_id id;
if (! kctl)
return;
if (activate)
kctl->vd[num].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
else
kctl->vd[num].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
snd_ctl_build_ioff(&id, kctl, num));
}
static void snd_trident_notify_pcm_change(struct snd_trident *trident,
struct snd_trident_pcm_mixer *tmix,
int num, int activate)
{
snd_trident_notify_pcm_change1(trident->card, trident->ctl_vol, num, activate);
snd_trident_notify_pcm_change1(trident->card, trident->ctl_pan, num, activate);
snd_trident_notify_pcm_change1(trident->card, trident->ctl_rvol, num, activate);
snd_trident_notify_pcm_change1(trident->card, trident->ctl_cvol, num, activate);
}
static int snd_trident_pcm_mixer_build(struct snd_trident *trident,
struct snd_trident_voice *voice,
struct snd_pcm_substream *substream)
{
struct snd_trident_pcm_mixer *tmix;
if (snd_BUG_ON(!trident || !voice || !substream))
return -EINVAL;
tmix = &trident->pcm_mixer[substream->number];
tmix->voice = voice;
tmix->vol = T4D_DEFAULT_PCM_VOL;
tmix->pan = T4D_DEFAULT_PCM_PAN;
tmix->rvol = T4D_DEFAULT_PCM_RVOL;
tmix->cvol = T4D_DEFAULT_PCM_CVOL;
snd_trident_notify_pcm_change(trident, tmix, substream->number, 1);
return 0;
}
static int snd_trident_pcm_mixer_free(struct snd_trident *trident, struct snd_trident_voice *voice, struct snd_pcm_substream *substream)
{
struct snd_trident_pcm_mixer *tmix;
if (snd_BUG_ON(!trident || !substream))
return -EINVAL;
tmix = &trident->pcm_mixer[substream->number];
tmix->voice = NULL;
snd_trident_notify_pcm_change(trident, tmix, substream->number, 0);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_mixer
Description: This routine registers the 4DWave device for mixer support.
Parameters: trident - pointer to target device class for 4DWave.
Returns: None
---------------------------------------------------------------------------*/
static int __devinit snd_trident_mixer(struct snd_trident * trident, int pcm_spdif_device)
{
struct snd_ac97_template _ac97;
struct snd_card *card = trident->card;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_value *uctl;
int idx, err, retries = 2;
static struct snd_ac97_bus_ops ops = {
.write = snd_trident_codec_write,
.read = snd_trident_codec_read,
};
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (!uctl)
return -ENOMEM;
if ((err = snd_ac97_bus(trident->card, 0, &ops, NULL, &trident->ac97_bus)) < 0)
goto __out;
memset(&_ac97, 0, sizeof(_ac97));
_ac97.private_data = trident;
trident->ac97_detect = 1;
__again:
if ((err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97)) < 0) {
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
if ((err = snd_trident_sis_reset(trident)) < 0)
goto __out;
if (retries-- > 0)
goto __again;
err = -EIO;
}
goto __out;
}
/* secondary codec? */
if (trident->device == TRIDENT_DEVICE_ID_SI7018 &&
(inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_PRIMARY_READY) != 0) {
_ac97.num = 1;
err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97_sec);
if (err < 0)
snd_printk(KERN_ERR "SI7018: the secondary codec - invalid access\n");
#if 0 // only for my testing purpose --jk
{
struct snd_ac97 *mc97;
err = snd_ac97_modem(trident->card, &_ac97, &mc97);
if (err < 0)
snd_printk(KERN_ERR "snd_ac97_modem returned error %i\n", err);
}
#endif
}
trident->ac97_detect = 0;
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_wave_control, trident))) < 0)
goto __out;
kctl->put(kctl, uctl);
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_music_control, trident))) < 0)
goto __out;
kctl->put(kctl, uctl);
outl(trident->musicvol_wavevol = 0x00000000, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL));
} else {
outl(trident->musicvol_wavevol = 0xffff0000, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL));
}
for (idx = 0; idx < 32; idx++) {
struct snd_trident_pcm_mixer *tmix;
tmix = &trident->pcm_mixer[idx];
tmix->voice = NULL;
}
if ((trident->ctl_vol = snd_ctl_new1(&snd_trident_pcm_vol_control, trident)) == NULL)
goto __nomem;
if ((err = snd_ctl_add(card, trident->ctl_vol)))
goto __out;
if ((trident->ctl_pan = snd_ctl_new1(&snd_trident_pcm_pan_control, trident)) == NULL)
goto __nomem;
if ((err = snd_ctl_add(card, trident->ctl_pan)))
goto __out;
if ((trident->ctl_rvol = snd_ctl_new1(&snd_trident_pcm_rvol_control, trident)) == NULL)
goto __nomem;
if ((err = snd_ctl_add(card, trident->ctl_rvol)))
goto __out;
if ((trident->ctl_cvol = snd_ctl_new1(&snd_trident_pcm_cvol_control, trident)) == NULL)
goto __nomem;
if ((err = snd_ctl_add(card, trident->ctl_cvol)))
goto __out;
if (trident->device == TRIDENT_DEVICE_ID_NX) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_ac97_rear_control, trident))) < 0)
goto __out;
kctl->put(kctl, uctl);
}
if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) {
kctl = snd_ctl_new1(&snd_trident_spdif_control, trident);
if (kctl == NULL) {
err = -ENOMEM;
goto __out;
}
if (trident->ac97->ext_id & AC97_EI_SPDIF)
kctl->id.index++;
if (trident->ac97_sec && (trident->ac97_sec->ext_id & AC97_EI_SPDIF))
kctl->id.index++;
idx = kctl->id.index;
if ((err = snd_ctl_add(card, kctl)) < 0)
goto __out;
kctl->put(kctl, uctl);
kctl = snd_ctl_new1(&snd_trident_spdif_default, trident);
if (kctl == NULL) {
err = -ENOMEM;
goto __out;
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
if ((err = snd_ctl_add(card, kctl)) < 0)
goto __out;
kctl = snd_ctl_new1(&snd_trident_spdif_mask, trident);
if (kctl == NULL) {
err = -ENOMEM;
goto __out;
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
if ((err = snd_ctl_add(card, kctl)) < 0)
goto __out;
kctl = snd_ctl_new1(&snd_trident_spdif_stream, trident);
if (kctl == NULL) {
err = -ENOMEM;
goto __out;
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
if ((err = snd_ctl_add(card, kctl)) < 0)
goto __out;
trident->spdif_pcm_ctl = kctl;
}
err = 0;
goto __out;
__nomem:
err = -ENOMEM;
__out:
kfree(uctl);
return err;
}
/*
* gameport interface
*/
#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
static unsigned char snd_trident_gameport_read(struct gameport *gameport)
{
struct snd_trident *chip = gameport_get_port_data(gameport);
if (snd_BUG_ON(!chip))
return 0;
return inb(TRID_REG(chip, GAMEPORT_LEGACY));
}
static void snd_trident_gameport_trigger(struct gameport *gameport)
{
struct snd_trident *chip = gameport_get_port_data(gameport);
if (snd_BUG_ON(!chip))
return;
outb(0xff, TRID_REG(chip, GAMEPORT_LEGACY));
}
static int snd_trident_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
{
struct snd_trident *chip = gameport_get_port_data(gameport);
int i;
if (snd_BUG_ON(!chip))
return 0;
*buttons = (~inb(TRID_REG(chip, GAMEPORT_LEGACY)) >> 4) & 0xf;
for (i = 0; i < 4; i++) {
axes[i] = inw(TRID_REG(chip, GAMEPORT_AXES + i * 2));
if (axes[i] == 0xffff) axes[i] = -1;
}
return 0;
}
static int snd_trident_gameport_open(struct gameport *gameport, int mode)
{
struct snd_trident *chip = gameport_get_port_data(gameport);
if (snd_BUG_ON(!chip))
return 0;
switch (mode) {
case GAMEPORT_MODE_COOKED:
outb(GAMEPORT_MODE_ADC, TRID_REG(chip, GAMEPORT_GCR));
msleep(20);
return 0;
case GAMEPORT_MODE_RAW:
outb(0, TRID_REG(chip, GAMEPORT_GCR));
return 0;
default:
return -1;
}
}
int __devinit snd_trident_create_gameport(struct snd_trident *chip)
{
struct gameport *gp;
chip->gameport = gp = gameport_allocate_port();
if (!gp) {
printk(KERN_ERR "trident: cannot allocate memory for gameport\n");
return -ENOMEM;
}
gameport_set_name(gp, "Trident 4DWave");
gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci));
gameport_set_dev_parent(gp, &chip->pci->dev);
gameport_set_port_data(gp, chip);
gp->fuzz = 64;
gp->read = snd_trident_gameport_read;
gp->trigger = snd_trident_gameport_trigger;
gp->cooked_read = snd_trident_gameport_cooked_read;
gp->open = snd_trident_gameport_open;
gameport_register_port(gp);
return 0;
}
static inline void snd_trident_free_gameport(struct snd_trident *chip)
{
if (chip->gameport) {
gameport_unregister_port(chip->gameport);
chip->gameport = NULL;
}
}
#else
int __devinit snd_trident_create_gameport(struct snd_trident *chip) { return -ENOSYS; }
static inline void snd_trident_free_gameport(struct snd_trident *chip) { }
#endif /* CONFIG_GAMEPORT */
/*
* delay for 1 tick
*/
static inline void do_delay(struct snd_trident *chip)
{
schedule_timeout_uninterruptible(1);
}
/*
* SiS reset routine
*/
static int snd_trident_sis_reset(struct snd_trident *trident)
{
unsigned long end_time;
unsigned int i;
int r;
r = trident->in_suspend ? 0 : 2; /* count of retries */
__si7018_retry:
pci_write_config_byte(trident->pci, 0x46, 0x04); /* SOFTWARE RESET */
udelay(100);
pci_write_config_byte(trident->pci, 0x46, 0x00);
udelay(100);
/* disable AC97 GPIO interrupt */
outb(0x00, TRID_REG(trident, SI_AC97_GPIO));
/* initialize serial interface, force cold reset */
i = PCMOUT|SURROUT|CENTEROUT|LFEOUT|SECONDARY_ID|COLD_RESET;
outl(i, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
udelay(1000);
/* remove cold reset */
i &= ~COLD_RESET;
outl(i, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
udelay(2000);
/* wait, until the codec is ready */
end_time = (jiffies + (HZ * 3) / 4) + 1;
do {
if ((inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_PRIMARY_READY) != 0)
goto __si7018_ok;
do_delay(trident);
} while (time_after_eq(end_time, jiffies));
snd_printk(KERN_ERR "AC'97 codec ready error [0x%x]\n", inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)));
if (r-- > 0) {
end_time = jiffies + HZ;
do {
do_delay(trident);
} while (time_after_eq(end_time, jiffies));
goto __si7018_retry;
}
__si7018_ok:
/* wait for the second codec */
do {
if ((inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_SECONDARY_READY) != 0)
break;
do_delay(trident);
} while (time_after_eq(end_time, jiffies));
/* enable 64 channel mode */
outl(BANK_B_EN, TRID_REG(trident, T4D_LFO_GC_CIR));
return 0;
}
/*
* /proc interface
*/
static void snd_trident_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_trident *trident = entry->private_data;
char *s;
switch (trident->device) {
case TRIDENT_DEVICE_ID_SI7018:
s = "SiS 7018 Audio";
break;
case TRIDENT_DEVICE_ID_DX:
s = "Trident 4DWave PCI DX";
break;
case TRIDENT_DEVICE_ID_NX:
s = "Trident 4DWave PCI NX";
break;
default:
s = "???";
}
snd_iprintf(buffer, "%s\n\n", s);
snd_iprintf(buffer, "Spurious IRQs : %d\n", trident->spurious_irq_count);
snd_iprintf(buffer, "Spurious IRQ dlta: %d\n", trident->spurious_irq_max_delta);
if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018)
snd_iprintf(buffer, "IEC958 Mixer Out : %s\n", trident->spdif_ctrl == 0x28 ? "on" : "off");
if (trident->device == TRIDENT_DEVICE_ID_NX) {
snd_iprintf(buffer, "Rear Speakers : %s\n", trident->ac97_ctrl & 0x00000010 ? "on" : "off");
if (trident->tlb.entries) {
snd_iprintf(buffer,"\nVirtual Memory\n");
snd_iprintf(buffer, "Memory Maximum : %d\n", trident->tlb.memhdr->size);
snd_iprintf(buffer, "Memory Used : %d\n", trident->tlb.memhdr->used);
snd_iprintf(buffer, "Memory Free : %d\n", snd_util_mem_avail(trident->tlb.memhdr));
}
}
}
static void __devinit snd_trident_proc_init(struct snd_trident * trident)
{
struct snd_info_entry *entry;
const char *s = "trident";
if (trident->device == TRIDENT_DEVICE_ID_SI7018)
s = "sis7018";
if (! snd_card_proc_new(trident->card, s, &entry))
snd_info_set_text_ops(entry, trident, snd_trident_proc_read);
}
static int snd_trident_dev_free(struct snd_device *device)
{
struct snd_trident *trident = device->device_data;
return snd_trident_free(trident);
}
/*---------------------------------------------------------------------------
snd_trident_tlb_alloc
Description: Allocate and set up the TLB page table on 4D NX.
Each entry has 4 bytes (physical PCI address).
Parameters: trident - pointer to target device class for 4DWave.
Returns: 0 or negative error code
---------------------------------------------------------------------------*/
static int __devinit snd_trident_tlb_alloc(struct snd_trident *trident)
{
int i;
/* TLB array must be aligned to 16kB !!! so we allocate
32kB region and correct offset when necessary */
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
2 * SNDRV_TRIDENT_MAX_PAGES * 4, &trident->tlb.buffer) < 0) {
snd_printk(KERN_ERR "trident: unable to allocate TLB buffer\n");
return -ENOMEM;
}
trident->tlb.entries = (unsigned int*)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
/* allocate shadow TLB page table (virtual addresses) */
trident->tlb.shadow_entries = vmalloc(SNDRV_TRIDENT_MAX_PAGES*sizeof(unsigned long));
if (trident->tlb.shadow_entries == NULL) {
snd_printk(KERN_ERR "trident: unable to allocate shadow TLB entries\n");
return -ENOMEM;
}
/* allocate and setup silent page and initialise TLB entries */
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
SNDRV_TRIDENT_PAGE_SIZE, &trident->tlb.silent_page) < 0) {
snd_printk(KERN_ERR "trident: unable to allocate silent page\n");
return -ENOMEM;
}
memset(trident->tlb.silent_page.area, 0, SNDRV_TRIDENT_PAGE_SIZE);
for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++) {
trident->tlb.entries[i] = cpu_to_le32(trident->tlb.silent_page.addr & ~(SNDRV_TRIDENT_PAGE_SIZE-1));
trident->tlb.shadow_entries[i] = (unsigned long)trident->tlb.silent_page.area;
}
/* use emu memory block manager code to manage tlb page allocation */
trident->tlb.memhdr = snd_util_memhdr_new(SNDRV_TRIDENT_PAGE_SIZE * SNDRV_TRIDENT_MAX_PAGES);
if (trident->tlb.memhdr == NULL)
return -ENOMEM;
trident->tlb.memhdr->block_extra_size = sizeof(struct snd_trident_memblk_arg);
return 0;
}
/*
* initialize 4D DX chip
*/
static void snd_trident_stop_all_voices(struct snd_trident *trident)
{
outl(0xffffffff, TRID_REG(trident, T4D_STOP_A));
outl(0xffffffff, TRID_REG(trident, T4D_STOP_B));
outl(0, TRID_REG(trident, T4D_AINTEN_A));
outl(0, TRID_REG(trident, T4D_AINTEN_B));
}
static int snd_trident_4d_dx_init(struct snd_trident *trident)
{
struct pci_dev *pci = trident->pci;
unsigned long end_time;
/* reset the legacy configuration and whole audio/wavetable block */
pci_write_config_dword(pci, 0x40, 0); /* DDMA */
pci_write_config_byte(pci, 0x44, 0); /* ports */
pci_write_config_byte(pci, 0x45, 0); /* Legacy DMA */
pci_write_config_byte(pci, 0x46, 4); /* reset */
udelay(100);
pci_write_config_byte(pci, 0x46, 0); /* release reset */
udelay(100);
/* warm reset of the AC'97 codec */
outl(0x00000001, TRID_REG(trident, DX_ACR2_AC97_COM_STAT));
udelay(100);
outl(0x00000000, TRID_REG(trident, DX_ACR2_AC97_COM_STAT));
/* DAC on, disable SB IRQ and try to force ADC valid signal */
trident->ac97_ctrl = 0x0000004a;
outl(trident->ac97_ctrl, TRID_REG(trident, DX_ACR2_AC97_COM_STAT));
/* wait, until the codec is ready */
end_time = (jiffies + (HZ * 3) / 4) + 1;
do {
if ((inl(TRID_REG(trident, DX_ACR2_AC97_COM_STAT)) & 0x0010) != 0)
goto __dx_ok;
do_delay(trident);
} while (time_after_eq(end_time, jiffies));
snd_printk(KERN_ERR "AC'97 codec ready error\n");
return -EIO;
__dx_ok:
snd_trident_stop_all_voices(trident);
return 0;
}
/*
* initialize 4D NX chip
*/
static int snd_trident_4d_nx_init(struct snd_trident *trident)
{
struct pci_dev *pci = trident->pci;
unsigned long end_time;
/* reset the legacy configuration and whole audio/wavetable block */
pci_write_config_dword(pci, 0x40, 0); /* DDMA */
pci_write_config_byte(pci, 0x44, 0); /* ports */
pci_write_config_byte(pci, 0x45, 0); /* Legacy DMA */
pci_write_config_byte(pci, 0x46, 1); /* reset */
udelay(100);
pci_write_config_byte(pci, 0x46, 0); /* release reset */
udelay(100);
/* warm reset of the AC'97 codec */
outl(0x00000001, TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
udelay(100);
outl(0x00000000, TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
/* wait, until the codec is ready */
end_time = (jiffies + (HZ * 3) / 4) + 1;
do {
if ((inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT)) & 0x0008) != 0)
goto __nx_ok;
do_delay(trident);
} while (time_after_eq(end_time, jiffies));
snd_printk(KERN_ERR "AC'97 codec ready error [0x%x]\n", inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT)));
return -EIO;
__nx_ok:
/* DAC on */
trident->ac97_ctrl = 0x00000002;
outl(trident->ac97_ctrl, TRID_REG(trident, NX_ACR0_AC97_COM_STAT));
/* disable SB IRQ */
outl(NX_SB_IRQ_DISABLE, TRID_REG(trident, T4D_MISCINT));
snd_trident_stop_all_voices(trident);
if (trident->tlb.entries != NULL) {
unsigned int i;
/* enable virtual addressing via TLB */
i = trident->tlb.entries_dmaaddr;
i |= 0x00000001;
outl(i, TRID_REG(trident, NX_TLBC));
} else {
outl(0, TRID_REG(trident, NX_TLBC));
}
/* initialize S/PDIF */
outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS));
outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
return 0;
}
/*
* initialize sis7018 chip
*/
static int snd_trident_sis_init(struct snd_trident *trident)
{
int err;
if ((err = snd_trident_sis_reset(trident)) < 0)
return err;
snd_trident_stop_all_voices(trident);
/* initialize S/PDIF */
outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS));
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_create
Description: This routine will create the device specific class for
the 4DWave card. It will also perform basic initialization.
Parameters: card - which card to create
pci - interface to PCI bus resource info
dma1ptr - playback dma buffer
dma2ptr - capture dma buffer
irqptr - interrupt resource info
Returns: 4DWave device class private data
---------------------------------------------------------------------------*/
int __devinit snd_trident_create(struct snd_card *card,
struct pci_dev *pci,
int pcm_streams,
int pcm_spdif_device,
int max_wavetable_size,
struct snd_trident ** rtrident)
{
struct snd_trident *trident;
int i, err;
struct snd_trident_voice *voice;
struct snd_trident_pcm_mixer *tmix;
static struct snd_device_ops ops = {
.dev_free = snd_trident_dev_free,
};
*rtrident = NULL;
/* enable PCI device */
if ((err = pci_enable_device(pci)) < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 30 bits */
if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0 ||
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(30)) < 0) {
snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n");
pci_disable_device(pci);
return -ENXIO;
}
trident = kzalloc(sizeof(*trident), GFP_KERNEL);
if (trident == NULL) {
pci_disable_device(pci);
return -ENOMEM;
}
trident->device = (pci->vendor << 16) | pci->device;
trident->card = card;
trident->pci = pci;
spin_lock_init(&trident->reg_lock);
spin_lock_init(&trident->event_lock);
spin_lock_init(&trident->voice_alloc);
if (pcm_streams < 1)
pcm_streams = 1;
if (pcm_streams > 32)
pcm_streams = 32;
trident->ChanPCM = pcm_streams;
if (max_wavetable_size < 0 )
max_wavetable_size = 0;
trident->synth.max_size = max_wavetable_size * 1024;
trident->irq = -1;
trident->midi_port = TRID_REG(trident, T4D_MPU401_BASE);
pci_set_master(pci);
if ((err = pci_request_regions(pci, "Trident Audio")) < 0) {
kfree(trident);
pci_disable_device(pci);
return err;
}
trident->port = pci_resource_start(pci, 0);
if (request_irq(pci->irq, snd_trident_interrupt, IRQF_SHARED,
KBUILD_MODNAME, trident)) {
snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
snd_trident_free(trident);
return -EBUSY;
}
trident->irq = pci->irq;
/* allocate 16k-aligned TLB for NX cards */
trident->tlb.entries = NULL;
trident->tlb.buffer.area = NULL;
if (trident->device == TRIDENT_DEVICE_ID_NX) {
if ((err = snd_trident_tlb_alloc(trident)) < 0) {
snd_trident_free(trident);
return err;
}
}
trident->spdif_bits = trident->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF;
/* initialize chip */
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
err = snd_trident_4d_dx_init(trident);
break;
case TRIDENT_DEVICE_ID_NX:
err = snd_trident_4d_nx_init(trident);
break;
case TRIDENT_DEVICE_ID_SI7018:
err = snd_trident_sis_init(trident);
break;
default:
snd_BUG();
break;
}
if (err < 0) {
snd_trident_free(trident);
return err;
}
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, trident, &ops)) < 0) {
snd_trident_free(trident);
return err;
}
if ((err = snd_trident_mixer(trident, pcm_spdif_device)) < 0)
return err;
/* initialise synth voices */
for (i = 0; i < 64; i++) {
voice = &trident->synth.voices[i];
voice->number = i;
voice->trident = trident;
}
/* initialize pcm mixer entries */
for (i = 0; i < 32; i++) {
tmix = &trident->pcm_mixer[i];
tmix->vol = T4D_DEFAULT_PCM_VOL;
tmix->pan = T4D_DEFAULT_PCM_PAN;
tmix->rvol = T4D_DEFAULT_PCM_RVOL;
tmix->cvol = T4D_DEFAULT_PCM_CVOL;
}
snd_trident_enable_eso(trident);
snd_trident_proc_init(trident);
snd_card_set_dev(card, &pci->dev);
*rtrident = trident;
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_free
Description: This routine will free the device specific class for
the 4DWave card.
Parameters: trident - device specific private data for 4DWave card
Returns: None.
---------------------------------------------------------------------------*/
static int snd_trident_free(struct snd_trident *trident)
{
snd_trident_free_gameport(trident);
snd_trident_disable_eso(trident);
// Disable S/PDIF out
if (trident->device == TRIDENT_DEVICE_ID_NX)
outb(0x00, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
else if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
outl(0, TRID_REG(trident, SI_SERIAL_INTF_CTRL));
}
if (trident->irq >= 0)
free_irq(trident->irq, trident);
if (trident->tlb.buffer.area) {
outl(0, TRID_REG(trident, NX_TLBC));
if (trident->tlb.memhdr)
snd_util_memhdr_free(trident->tlb.memhdr);
if (trident->tlb.silent_page.area)
snd_dma_free_pages(&trident->tlb.silent_page);
vfree(trident->tlb.shadow_entries);
snd_dma_free_pages(&trident->tlb.buffer);
}
pci_release_regions(trident->pci);
pci_disable_device(trident->pci);
kfree(trident);
return 0;
}
/*---------------------------------------------------------------------------
snd_trident_interrupt
Description: ISR for Trident 4DWave device
Parameters: trident - device specific private data for 4DWave card
Problems: It seems that Trident chips generates interrupts more than
one time in special cases. The spurious interrupts are
detected via sample timer (T4D_STIMER) and computing
corresponding delta value. The limits are detected with
the method try & fail so it is possible that it won't
work on all computers. [jaroslav]
Returns: None.
---------------------------------------------------------------------------*/
static irqreturn_t snd_trident_interrupt(int irq, void *dev_id)
{
struct snd_trident *trident = dev_id;
unsigned int audio_int, chn_int, stimer, channel, mask, tmp;
int delta;
struct snd_trident_voice *voice;
audio_int = inl(TRID_REG(trident, T4D_MISCINT));
if ((audio_int & (ADDRESS_IRQ|MPU401_IRQ)) == 0)
return IRQ_NONE;
if (audio_int & ADDRESS_IRQ) {
// get interrupt status for all channels
spin_lock(&trident->reg_lock);
stimer = inl(TRID_REG(trident, T4D_STIMER)) & 0x00ffffff;
chn_int = inl(TRID_REG(trident, T4D_AINT_A));
if (chn_int == 0)
goto __skip1;
outl(chn_int, TRID_REG(trident, T4D_AINT_A)); /* ack */
__skip1:
chn_int = inl(TRID_REG(trident, T4D_AINT_B));
if (chn_int == 0)
goto __skip2;
for (channel = 63; channel >= 32; channel--) {
mask = 1 << (channel&0x1f);
if ((chn_int & mask) == 0)
continue;
voice = &trident->synth.voices[channel];
if (!voice->pcm || voice->substream == NULL) {
outl(mask, TRID_REG(trident, T4D_STOP_B));
continue;
}
delta = (int)stimer - (int)voice->stimer;
if (delta < 0)
delta = -delta;
if ((unsigned int)delta < voice->spurious_threshold) {
/* do some statistics here */
trident->spurious_irq_count++;
if (trident->spurious_irq_max_delta < (unsigned int)delta)
trident->spurious_irq_max_delta = delta;
continue;
}
voice->stimer = stimer;
if (voice->isync) {
if (!voice->isync3) {
tmp = inw(TRID_REG(trident, T4D_SBBL_SBCL));
if (trident->bDMAStart & 0x40)
tmp >>= 1;
if (tmp > 0)
tmp = voice->isync_max - tmp;
} else {
tmp = inl(TRID_REG(trident, NX_SPCTRL_SPCSO)) & 0x00ffffff;
}
if (tmp < voice->isync_mark) {
if (tmp > 0x10)
tmp = voice->isync_ESO - 7;
else
tmp = voice->isync_ESO + 2;
/* update ESO for IRQ voice to preserve sync */
snd_trident_stop_voice(trident, voice->number);
snd_trident_write_eso_reg(trident, voice, tmp);
snd_trident_start_voice(trident, voice->number);
}
} else if (voice->isync2) {
voice->isync2 = 0;
/* write original ESO and update CSO for IRQ voice to preserve sync */
snd_trident_stop_voice(trident, voice->number);
snd_trident_write_cso_reg(trident, voice, voice->isync_mark);
snd_trident_write_eso_reg(trident, voice, voice->ESO);
snd_trident_start_voice(trident, voice->number);
}
#if 0
if (voice->extra) {
/* update CSO for extra voice to preserve sync */
snd_trident_stop_voice(trident, voice->extra->number);
snd_trident_write_cso_reg(trident, voice->extra, 0);
snd_trident_start_voice(trident, voice->extra->number);
}
#endif
spin_unlock(&trident->reg_lock);
snd_pcm_period_elapsed(voice->substream);
spin_lock(&trident->reg_lock);
}
outl(chn_int, TRID_REG(trident, T4D_AINT_B)); /* ack */
__skip2:
spin_unlock(&trident->reg_lock);
}
if (audio_int & MPU401_IRQ) {
if (trident->rmidi) {
snd_mpu401_uart_interrupt(irq, trident->rmidi->private_data);
} else {
inb(TRID_REG(trident, T4D_MPUR0));
}
}
// outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), TRID_REG(trident, T4D_MISCINT));
return IRQ_HANDLED;
}
struct snd_trident_voice *snd_trident_alloc_voice(struct snd_trident * trident, int type, int client, int port)
{
struct snd_trident_voice *pvoice;
unsigned long flags;
int idx;
spin_lock_irqsave(&trident->voice_alloc, flags);
if (type == SNDRV_TRIDENT_VOICE_TYPE_PCM) {
idx = snd_trident_allocate_pcm_channel(trident);
if(idx < 0) {
spin_unlock_irqrestore(&trident->voice_alloc, flags);
return NULL;
}
pvoice = &trident->synth.voices[idx];
pvoice->use = 1;
pvoice->pcm = 1;
pvoice->capture = 0;
pvoice->spdif = 0;
pvoice->memblk = NULL;
pvoice->substream = NULL;
spin_unlock_irqrestore(&trident->voice_alloc, flags);
return pvoice;
}
if (type == SNDRV_TRIDENT_VOICE_TYPE_SYNTH) {
idx = snd_trident_allocate_synth_channel(trident);
if(idx < 0) {
spin_unlock_irqrestore(&trident->voice_alloc, flags);
return NULL;
}
pvoice = &trident->synth.voices[idx];
pvoice->use = 1;
pvoice->synth = 1;
pvoice->client = client;
pvoice->port = port;
pvoice->memblk = NULL;
spin_unlock_irqrestore(&trident->voice_alloc, flags);
return pvoice;
}
if (type == SNDRV_TRIDENT_VOICE_TYPE_MIDI) {
}
spin_unlock_irqrestore(&trident->voice_alloc, flags);
return NULL;
}
EXPORT_SYMBOL(snd_trident_alloc_voice);
void snd_trident_free_voice(struct snd_trident * trident, struct snd_trident_voice *voice)
{
unsigned long flags;
void (*private_free)(struct snd_trident_voice *);
void *private_data;
if (voice == NULL || !voice->use)
return;
snd_trident_clear_voices(trident, voice->number, voice->number);
spin_lock_irqsave(&trident->voice_alloc, flags);
private_free = voice->private_free;
private_data = voice->private_data;
voice->private_free = NULL;
voice->private_data = NULL;
if (voice->pcm)
snd_trident_free_pcm_channel(trident, voice->number);
if (voice->synth)
snd_trident_free_synth_channel(trident, voice->number);
voice->use = voice->pcm = voice->synth = voice->midi = 0;
voice->capture = voice->spdif = 0;
voice->sample_ops = NULL;
voice->substream = NULL;
voice->extra = NULL;
spin_unlock_irqrestore(&trident->voice_alloc, flags);
if (private_free)
private_free(voice);
}
EXPORT_SYMBOL(snd_trident_free_voice);
static void snd_trident_clear_voices(struct snd_trident * trident, unsigned short v_min, unsigned short v_max)
{
unsigned int i, val, mask[2] = { 0, 0 };
if (snd_BUG_ON(v_min > 63 || v_max > 63))
return;
for (i = v_min; i <= v_max; i++)
mask[i >> 5] |= 1 << (i & 0x1f);
if (mask[0]) {
outl(mask[0], TRID_REG(trident, T4D_STOP_A));
val = inl(TRID_REG(trident, T4D_AINTEN_A));
outl(val & ~mask[0], TRID_REG(trident, T4D_AINTEN_A));
}
if (mask[1]) {
outl(mask[1], TRID_REG(trident, T4D_STOP_B));
val = inl(TRID_REG(trident, T4D_AINTEN_B));
outl(val & ~mask[1], TRID_REG(trident, T4D_AINTEN_B));
}
}
#ifdef CONFIG_PM
int snd_trident_suspend(struct pci_dev *pci, pm_message_t state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct snd_trident *trident = card->private_data;
trident->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(trident->pcm);
snd_pcm_suspend_all(trident->foldback);
snd_pcm_suspend_all(trident->spdif);
snd_ac97_suspend(trident->ac97);
snd_ac97_suspend(trident->ac97_sec);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, pci_choose_state(pci, state));
return 0;
}
int snd_trident_resume(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
struct snd_trident *trident = card->private_data;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
printk(KERN_ERR "trident: pci_enable_device failed, "
"disabling device\n");
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
snd_trident_4d_dx_init(trident);
break;
case TRIDENT_DEVICE_ID_NX:
snd_trident_4d_nx_init(trident);
break;
case TRIDENT_DEVICE_ID_SI7018:
snd_trident_sis_init(trident);
break;
}
snd_ac97_resume(trident->ac97);
snd_ac97_resume(trident->ac97_sec);
/* restore some registers */
outl(trident->musicvol_wavevol, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL));
snd_trident_enable_eso(trident);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trident->in_suspend = 0;
return 0;
}
#endif /* CONFIG_PM */
| gpl-2.0 |
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-m4 | arch/arm/mach-dove/mpp.c | 7665 | 3908 | /*
* arch/arm/mach-dove/mpp.c
*
* MPP functions for Marvell Dove SoCs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <plat/mpp.h>
#include <mach/dove.h>
#include "mpp.h"
struct dove_mpp_grp {
int start;
int end;
};
/* Map a group to a range of GPIO pins in that group */
static const struct dove_mpp_grp dove_mpp_grp[] = {
[MPP_24_39] = {
.start = 24,
.end = 39,
},
[MPP_40_45] = {
.start = 40,
.end = 45,
},
[MPP_46_51] = {
.start = 46,
.end = 51,
},
[MPP_58_61] = {
.start = 58,
.end = 61,
},
[MPP_62_63] = {
.start = 62,
.end = 63,
},
};
/* Enable gpio for a range of pins. mode should be a combination of
GPIO_OUTPUT_OK | GPIO_INPUT_OK */
static void dove_mpp_gpio_mode(int start, int end, int gpio_mode)
{
int i;
for (i = start; i <= end; i++)
orion_gpio_set_valid(i, gpio_mode);
}
/* Dump all the extra MPP registers. The platform code will dump the
registers for pins 0-23. */
static void dove_mpp_dump_regs(void)
{
pr_debug("PMU_CTRL4_CTRL: %08x\n",
readl(DOVE_MPP_CTRL4_VIRT_BASE));
pr_debug("PMU_MPP_GENERAL_CTRL: %08x\n",
readl(DOVE_PMU_MPP_GENERAL_CTRL));
pr_debug("MPP_GENERAL: %08x\n", readl(DOVE_MPP_GENERAL_VIRT_BASE));
}
static void dove_mpp_cfg_nfc(int sel)
{
u32 mpp_gen_cfg = readl(DOVE_MPP_GENERAL_VIRT_BASE);
mpp_gen_cfg &= ~0x1;
mpp_gen_cfg |= sel;
writel(mpp_gen_cfg, DOVE_MPP_GENERAL_VIRT_BASE);
dove_mpp_gpio_mode(64, 71, GPIO_OUTPUT_OK);
}
static void dove_mpp_cfg_au1(int sel)
{
u32 mpp_ctrl4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
u32 ssp_ctrl1 = readl(DOVE_SSP_CTRL_STATUS_1);
u32 mpp_gen_ctrl = readl(DOVE_MPP_GENERAL_VIRT_BASE);
u32 global_cfg_2 = readl(DOVE_GLOBAL_CONFIG_2);
mpp_ctrl4 &= ~(DOVE_AU1_GPIO_SEL);
ssp_ctrl1 &= ~(DOVE_SSP_ON_AU1);
mpp_gen_ctrl &= ~(DOVE_AU1_SPDIFO_GPIO_EN);
global_cfg_2 &= ~(DOVE_TWSI_OPTION3_GPIO);
if (!sel || sel == 0x2)
dove_mpp_gpio_mode(52, 57, 0);
else
dove_mpp_gpio_mode(52, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK);
if (sel & 0x1) {
global_cfg_2 |= DOVE_TWSI_OPTION3_GPIO;
dove_mpp_gpio_mode(56, 57, 0);
}
if (sel & 0x2) {
mpp_gen_ctrl |= DOVE_AU1_SPDIFO_GPIO_EN;
dove_mpp_gpio_mode(57, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK);
}
if (sel & 0x4) {
ssp_ctrl1 |= DOVE_SSP_ON_AU1;
dove_mpp_gpio_mode(52, 55, 0);
}
if (sel & 0x8)
mpp_ctrl4 |= DOVE_AU1_GPIO_SEL;
writel(mpp_ctrl4, DOVE_MPP_CTRL4_VIRT_BASE);
writel(ssp_ctrl1, DOVE_SSP_CTRL_STATUS_1);
writel(mpp_gen_ctrl, DOVE_MPP_GENERAL_VIRT_BASE);
writel(global_cfg_2, DOVE_GLOBAL_CONFIG_2);
}
/* Configure the group registers, enabling GPIO if sel indicates the
pin is to be used for GPIO */
static void dove_mpp_conf_grp(unsigned int *mpp_grp_list)
{
u32 mpp_ctrl4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
int gpio_mode;
for ( ; *mpp_grp_list; mpp_grp_list++) {
unsigned int num = MPP_NUM(*mpp_grp_list);
unsigned int sel = MPP_SEL(*mpp_grp_list);
if (num > MPP_GRP_MAX) {
pr_err("dove: invalid MPP GRP number (%u)\n", num);
continue;
}
mpp_ctrl4 &= ~(0x1 << num);
mpp_ctrl4 |= sel << num;
gpio_mode = sel ? GPIO_OUTPUT_OK | GPIO_INPUT_OK : 0;
dove_mpp_gpio_mode(dove_mpp_grp[num].start,
dove_mpp_grp[num].end, gpio_mode);
}
writel(mpp_ctrl4, DOVE_MPP_CTRL4_VIRT_BASE);
}
/* Configure the various MPP pins on Dove */
void __init dove_mpp_conf(unsigned int *mpp_list,
unsigned int *mpp_grp_list,
unsigned int grp_au1_52_57,
unsigned int grp_nfc_64_71)
{
dove_mpp_dump_regs();
/* Use platform code for pins 0-23 */
orion_mpp_conf(mpp_list, 0, MPP_MAX, DOVE_MPP_VIRT_BASE);
dove_mpp_conf_grp(mpp_grp_list);
dove_mpp_cfg_au1(grp_au1_52_57);
dove_mpp_cfg_nfc(grp_nfc_64_71);
dove_mpp_dump_regs();
}
| gpl-2.0 |
nemomobile/kernel-adaptation-n950-n9 | fs/cifs/cache.c | 7921 | 8134 | /*
* fs/cifs/cache.c - CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "fscache.h"
#include "cifs_debug.h"
/*
* CIFS filesystem definition for FS-Cache
*/
struct fscache_netfs cifs_fscache_netfs = {
.name = "cifs",
.version = 0,
};
/*
* Register CIFS for caching with FS-Cache
*/
int cifs_fscache_register(void)
{
return fscache_register_netfs(&cifs_fscache_netfs);
}
/*
* Unregister CIFS for caching
*/
void cifs_fscache_unregister(void)
{
fscache_unregister_netfs(&cifs_fscache_netfs);
}
/*
* Key layout of CIFS server cache index object
*/
struct cifs_server_key {
uint16_t family; /* address family */
__be16 port; /* IP port */
union {
struct in_addr ipv4_addr;
struct in6_addr ipv6_addr;
} addr[0];
};
/*
* Server object keyed by {IPaddress,port,family} tuple
*/
static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t maxbuf)
{
const struct TCP_Server_Info *server = cookie_netfs_data;
const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
struct cifs_server_key *key = buffer;
uint16_t key_len = sizeof(struct cifs_server_key);
memset(key, 0, key_len);
/*
* Should not be a problem as sin_family/sin6_family overlays
* sa_family field
*/
switch (sa->sa_family) {
case AF_INET:
key->family = sa->sa_family;
key->port = addr->sin_port;
key->addr[0].ipv4_addr = addr->sin_addr;
key_len += sizeof(key->addr[0].ipv4_addr);
break;
case AF_INET6:
key->family = sa->sa_family;
key->port = addr6->sin6_port;
key->addr[0].ipv6_addr = addr6->sin6_addr;
key_len += sizeof(key->addr[0].ipv6_addr);
break;
default:
cERROR(1, "Unknown network family '%d'", sa->sa_family);
key_len = 0;
break;
}
return key_len;
}
/*
* Server object for FS-Cache
*/
const struct fscache_cookie_def cifs_fscache_server_index_def = {
.name = "CIFS.server",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = cifs_server_get_key,
};
/*
* Auxiliary data attached to CIFS superblock within the cache
*/
struct cifs_fscache_super_auxdata {
u64 resource_id; /* unique server resource id */
};
static char *extract_sharename(const char *treename)
{
const char *src;
char *delim, *dst;
int len;
/* skip double chars at the beginning */
src = treename + 2;
/* share name is always preceded by '\\' now */
delim = strchr(src, '\\');
if (!delim)
return ERR_PTR(-EINVAL);
delim++;
len = strlen(delim);
/* caller has to free the memory */
dst = kstrndup(delim, len, GFP_KERNEL);
if (!dst)
return ERR_PTR(-ENOMEM);
return dst;
}
/*
* Superblock object currently keyed by share name
*/
static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
const struct cifs_tcon *tcon = cookie_netfs_data;
char *sharename;
uint16_t len;
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
cFYI(1, "%s: couldn't extract sharename\n", __func__);
sharename = NULL;
return 0;
}
len = strlen(sharename);
if (len > maxbuf)
return 0;
memcpy(buffer, sharename, len);
kfree(sharename);
return len;
}
static uint16_t
cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
struct cifs_fscache_super_auxdata auxdata;
const struct cifs_tcon *tcon = cookie_netfs_data;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.resource_id = tcon->resource_id;
if (maxbuf > sizeof(auxdata))
maxbuf = sizeof(auxdata);
memcpy(buffer, &auxdata, maxbuf);
return maxbuf;
}
static enum
fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
const void *data,
uint16_t datalen)
{
struct cifs_fscache_super_auxdata auxdata;
const struct cifs_tcon *tcon = cookie_netfs_data;
if (datalen != sizeof(auxdata))
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.resource_id = tcon->resource_id;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
return FSCACHE_CHECKAUX_OKAY;
}
/*
* Superblock object for FS-Cache
*/
const struct fscache_cookie_def cifs_fscache_super_index_def = {
.name = "CIFS.super",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = cifs_super_get_key,
.get_aux = cifs_fscache_super_get_aux,
.check_aux = cifs_fscache_super_check_aux,
};
/*
* Auxiliary data attached to CIFS inode within the cache
*/
struct cifs_fscache_inode_auxdata {
struct timespec last_write_time;
struct timespec last_change_time;
u64 eof;
};
static uint16_t cifs_fscache_inode_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t maxbuf)
{
const struct cifsInodeInfo *cifsi = cookie_netfs_data;
uint16_t keylen;
/* use the UniqueId as the key */
keylen = sizeof(cifsi->uniqueid);
if (keylen > maxbuf)
keylen = 0;
else
memcpy(buffer, &cifsi->uniqueid, keylen);
return keylen;
}
static void
cifs_fscache_inode_get_attr(const void *cookie_netfs_data, uint64_t *size)
{
const struct cifsInodeInfo *cifsi = cookie_netfs_data;
*size = cifsi->vfs_inode.i_size;
}
static uint16_t
cifs_fscache_inode_get_aux(const void *cookie_netfs_data, void *buffer,
uint16_t maxbuf)
{
struct cifs_fscache_inode_auxdata auxdata;
const struct cifsInodeInfo *cifsi = cookie_netfs_data;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
auxdata.last_write_time = cifsi->vfs_inode.i_mtime;
auxdata.last_change_time = cifsi->vfs_inode.i_ctime;
if (maxbuf > sizeof(auxdata))
maxbuf = sizeof(auxdata);
memcpy(buffer, &auxdata, maxbuf);
return maxbuf;
}
static enum
fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
const void *data,
uint16_t datalen)
{
struct cifs_fscache_inode_auxdata auxdata;
struct cifsInodeInfo *cifsi = cookie_netfs_data;
if (datalen != sizeof(auxdata))
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
auxdata.last_write_time = cifsi->vfs_inode.i_mtime;
auxdata.last_change_time = cifsi->vfs_inode.i_ctime;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
return FSCACHE_CHECKAUX_OKAY;
}
static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
{
struct cifsInodeInfo *cifsi = cookie_netfs_data;
struct pagevec pvec;
pgoff_t first;
int loop, nr_pages;
pagevec_init(&pvec, 0);
first = 0;
cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi);
for (;;) {
nr_pages = pagevec_lookup(&pvec,
cifsi->vfs_inode.i_mapping, first,
PAGEVEC_SIZE - pagevec_count(&pvec));
if (!nr_pages)
break;
for (loop = 0; loop < nr_pages; loop++)
ClearPageFsCache(pvec.pages[loop]);
first = pvec.pages[nr_pages - 1]->index + 1;
pvec.nr = nr_pages;
pagevec_release(&pvec);
cond_resched();
}
}
const struct fscache_cookie_def cifs_fscache_inode_object_def = {
.name = "CIFS.uniqueid",
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
.get_key = cifs_fscache_inode_get_key,
.get_attr = cifs_fscache_inode_get_attr,
.get_aux = cifs_fscache_inode_get_aux,
.check_aux = cifs_fscache_inode_check_aux,
.now_uncached = cifs_fscache_inode_now_uncached,
};
| gpl-2.0 |
alexax66/LP-Kernel-SM-E500H | net/ipv4/tcp_vegas.c | 8945 | 9825 | /*
* TCP Vegas congestion control
*
* This is based on the congestion detection/avoidance scheme described in
* Lawrence S. Brakmo and Larry L. Peterson.
* "TCP Vegas: End to end congestion avoidance on a global internet."
* IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
* October 1995. Available from:
* ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
*
* See http://www.cs.arizona.edu/xkernel/ for their implementation.
* The main aspects that distinguish this implementation from the
* Arizona Vegas implementation are:
* o We do not change the loss detection or recovery mechanisms of
* Linux in any way. Linux already recovers from losses quite well,
* using fine-grained timers, NewReno, and FACK.
* o To avoid the performance penalty imposed by increasing cwnd
* only every-other RTT during slow start, we increase during
* every RTT during slow start, just like Reno.
* o Largely to allow continuous cwnd growth during slow start,
* we use the rate at which ACKs come back as the "actual"
* rate, rather than the rate at which data is sent.
* o To speed convergence to the right rate, we set the cwnd
* to achieve the right ("actual") rate when we exit slow start.
* o To filter out the noise caused by delayed ACKs, we use the
* minimum RTT sample observed during the last RTT to calculate
* the actual rate.
* o When the sender re-starts from idle, it waits until it has
* received ACKs for an entire flight of new data before making
* a cwnd adjustment decision. The original Vegas implementation
* assumed senders never went idle.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <net/tcp.h>
#include "tcp_vegas.h"
static int alpha = 2;
static int beta = 4;
static int gamma = 1;
module_param(alpha, int, 0644);
MODULE_PARM_DESC(alpha, "lower bound of packets in network");
module_param(beta, int, 0644);
MODULE_PARM_DESC(beta, "upper bound of packets in network");
module_param(gamma, int, 0644);
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
/* There are several situations when we must "re-start" Vegas:
*
* o when a connection is established
* o after an RTO
* o after fast recovery
* o when we send a packet and there is no outstanding
* unacknowledged data (restarting an idle connection)
*
* In these circumstances we cannot do a Vegas calculation at the
* end of the first RTT, because any calculation we do is using
* stale info -- both the saved cwnd and congestion feedback are
* stale.
*
* Instead we must wait until the completion of an RTT during
* which we actually receive ACKs.
*/
static void vegas_enable(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk);
/* Begin taking Vegas samples next time we send something. */
vegas->doing_vegas_now = 1;
/* Set the beginning of the next send window. */
vegas->beg_snd_nxt = tp->snd_nxt;
vegas->cntRTT = 0;
vegas->minRTT = 0x7fffffff;
}
/* Stop taking Vegas samples for now. */
static inline void vegas_disable(struct sock *sk)
{
struct vegas *vegas = inet_csk_ca(sk);
vegas->doing_vegas_now = 0;
}
void tcp_vegas_init(struct sock *sk)
{
struct vegas *vegas = inet_csk_ca(sk);
vegas->baseRTT = 0x7fffffff;
vegas_enable(sk);
}
EXPORT_SYMBOL_GPL(tcp_vegas_init);
/* Do RTT sampling needed for Vegas.
* Basically we:
* o min-filter RTT samples from within an RTT to get the current
* propagation delay + queuing delay (we are min-filtering to try to
* avoid the effects of delayed ACKs)
* o min-filter RTT samples from a much longer window (forever for now)
* to find the propagation delay (baseRTT)
*/
void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct vegas *vegas = inet_csk_ca(sk);
u32 vrtt;
if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < vegas->baseRTT)
vegas->baseRTT = vrtt;
/* Find the min RTT during the last RTT to find
* the current prop. delay + queuing delay:
*/
vegas->minRTT = min(vegas->minRTT, vrtt);
vegas->cntRTT++;
}
EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked);
void tcp_vegas_state(struct sock *sk, u8 ca_state)
{
if (ca_state == TCP_CA_Open)
vegas_enable(sk);
else
vegas_disable(sk);
}
EXPORT_SYMBOL_GPL(tcp_vegas_state);
/*
* If the connection is idle and we are restarting,
* then we don't want to do any Vegas calculations
* until we get fresh RTT samples. So when we
* restart, we reset our Vegas state to a clean
* slate. After we get acks for this flight of
* packets, _then_ we can make Vegas calculations
* again.
*/
void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_CWND_RESTART ||
event == CA_EVENT_TX_START)
tcp_vegas_init(sk);
}
EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
{
return min(tp->snd_ssthresh, tp->snd_cwnd-1);
}
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk);
if (!vegas->doing_vegas_now) {
tcp_reno_cong_avoid(sk, ack, in_flight);
return;
}
if (after(ack, vegas->beg_snd_nxt)) {
/* Do the Vegas once-per-RTT cwnd adjustment. */
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
vegas->beg_snd_nxt = tp->snd_nxt;
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
* at least one RTT sample that wasn't from a delayed ACK.
* If we only had 2 samples total,
* then that means we're getting only 1 ACK per RTT, which
* means they're almost certainly delayed ACKs.
* If we have 3 samples, we should be OK.
*/
if (vegas->cntRTT <= 2) {
/* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno.
*/
tcp_reno_cong_avoid(sk, ack, in_flight);
} else {
u32 rtt, diff;
u64 target_cwnd;
/* We have enough RTT samples, so, using the Vegas
* algorithm, we determine if we should increase or
* decrease cwnd, and by how much.
*/
/* Pluck out the RTT we are using for the Vegas
* calculations. This is the min RTT seen during the
* last RTT. Taking the min filters out the effects
* of delayed ACKs, at the cost of noticing congestion
* a bit later.
*/
rtt = vegas->minRTT;
/* Calculate the cwnd we should have, if we weren't
* going too fast.
*
* This is:
* (actual rate in segments) * baseRTT
*/
target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
*/
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
/* Going too fast. Time to slow down
* and switch to congestion avoidance.
*/
/* Set cwnd to match the actual rate
* exactly:
* cwnd = (actual rate) * baseRTT
* Then we add 1 because the integer
* truncation robs us of full link
* utilization.
*/
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
/* Figure out where we would like cwnd
* to be.
*/
if (diff > beta) {
/* The old window was too fast, so
* we slow down.
*/
tp->snd_cwnd--;
tp->snd_ssthresh
= tcp_vegas_ssthresh(tp);
} else if (diff < alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
tp->snd_cwnd++;
} else {
/* Sending just as fast as we
* should be.
*/
}
}
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
}
/* Wipe the slate clean for the next RTT. */
vegas->cntRTT = 0;
vegas->minRTT = 0x7fffffff;
}
/* Use normal slow start */
else if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
}
/* Extract info for Tcp socket info provided via netlink. */
void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
{
const struct vegas *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
struct tcpvegas_info info = {
.tcpv_enabled = ca->doing_vegas_now,
.tcpv_rttcnt = ca->cntRTT,
.tcpv_rtt = ca->baseRTT,
.tcpv_minrtt = ca->minRTT,
};
nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
}
EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
static struct tcp_congestion_ops tcp_vegas __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_vegas_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_vegas_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_vegas_pkts_acked,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
.get_info = tcp_vegas_get_info,
.owner = THIS_MODULE,
.name = "vegas",
};
static int __init tcp_vegas_register(void)
{
BUILD_BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE);
tcp_register_congestion_control(&tcp_vegas);
return 0;
}
static void __exit tcp_vegas_unregister(void)
{
tcp_unregister_congestion_control(&tcp_vegas);
}
module_init(tcp_vegas_register);
module_exit(tcp_vegas_unregister);
MODULE_AUTHOR("Stephen Hemminger");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Vegas");
| gpl-2.0 |
Oebbler/elite-boeffla-kernel-cm12.1-i9300 | drivers/net/irda/litelink-sir.c | 12529 | 5436 | /*********************************************************************
*
* Filename: litelink.c
* Version: 1.1
* Description: Driver for the Parallax LiteLink dongle
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri May 7 12:50:33 1999
* Modified at: Fri Dec 17 09:14:23 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
********************************************************************/
/*
* Modified at: Thu Jan 15 2003
* Modified by: Eugene Crosser <crosser@average.org>
*
* Convert to "new" IRDA infrastructure for kernel 2.6
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
#define MAX_DELAY 10000 /* 1 ms */
static int litelink_open(struct sir_dev *dev);
static int litelink_close(struct sir_dev *dev);
static int litelink_change_speed(struct sir_dev *dev, unsigned speed);
static int litelink_reset(struct sir_dev *dev);
/* These are the baudrates supported - 9600 must be last one! */
static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
static struct dongle_driver litelink = {
.owner = THIS_MODULE,
.driver_name = "Parallax LiteLink",
.type = IRDA_LITELINK_DONGLE,
.open = litelink_open,
.close = litelink_close,
.reset = litelink_reset,
.set_speed = litelink_change_speed,
};
static int __init litelink_sir_init(void)
{
return irda_register_dongle(&litelink);
}
static void __exit litelink_sir_cleanup(void)
{
irda_unregister_dongle(&litelink);
}
static int litelink_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
IRDA_DEBUG(2, "%s()\n", __func__);
/* Power up dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Set the speeds we can accept */
qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600;
qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
irda_qos_bits_to_value(qos);
/* irda thread waits 50 msec for power settling */
return 0;
}
static int litelink_close(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
return 0;
}
/*
* Function litelink_change_speed (task)
*
* Change speed of the Litelink dongle. To cycle through the available
* baud rates, pulse RTS low for a few ms.
*/
static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
{
int i;
IRDA_DEBUG(2, "%s()\n", __func__);
/* dongle already reset by irda-thread - current speed (dongle and
* port) is the default speed (115200 for litelink!)
*/
/* Cycle through avaiable baudrates until we reach the correct one */
for (i = 0; baud_rates[i] != speed; i++) {
/* end-of-list reached due to invalid speed request */
if (baud_rates[i] == 9600)
break;
/* Set DTR, clear RTS */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
/* Sleep a minimum of 15 us */
udelay(MIN_DELAY);
/* Set DTR, Set RTS */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
udelay(MIN_DELAY);
}
dev->speed = baud_rates[i];
/* invalid baudrate should not happen - but if, we return -EINVAL and
* the dongle configured for 9600 so the stack has a chance to recover
*/
return (dev->speed == speed) ? 0 : -EINVAL;
}
/*
* Function litelink_reset (task)
*
* Reset the Litelink type dongle.
*
*/
static int litelink_reset(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__);
/* probably the power-up can be dropped here, but with only
* 15 usec delay it's not worth the risk unless somebody with
* the hardware confirms it doesn't break anything...
*/
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
udelay(MIN_DELAY);
/* Clear RTS to reset dongle */
sirdev_set_dtr_rts(dev, TRUE, FALSE);
/* Sleep a minimum of 15 us */
udelay(MIN_DELAY);
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
udelay(MIN_DELAY);
/* This dongles speed defaults to 115200 bps */
dev->speed = 115200;
return 0;
}
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("Parallax Litelink dongle driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
/*
* Function init_module (void)
*
* Initialize Litelink module
*
*/
module_init(litelink_sir_init);
/*
* Function cleanup_module (void)
*
* Cleanup Litelink module
*
*/
module_exit(litelink_sir_cleanup);
| gpl-2.0 |
popdog123/popdog123-kernel | drivers/net/irda/mcp2120-sir.c | 13297 | 5867 | /*********************************************************************
*
*
* Filename: mcp2120.c
* Version: 1.0
* Description: Implementation for the MCP2120 (Microchip)
* Status: Experimental.
* Author: Felix Tang (tangf@eyetap.org)
* Created at: Sun Mar 31 19:32:12 EST 2002
* Based on code by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 2002 Felix Tang, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
static int mcp2120_reset(struct sir_dev *dev);
static int mcp2120_open(struct sir_dev *dev);
static int mcp2120_close(struct sir_dev *dev);
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
#define MCP2120_9600 0x87
#define MCP2120_19200 0x8B
#define MCP2120_38400 0x85
#define MCP2120_57600 0x83
#define MCP2120_115200 0x81
#define MCP2120_COMMIT 0x11
static struct dongle_driver mcp2120 = {
.owner = THIS_MODULE,
.driver_name = "Microchip MCP2120",
.type = IRDA_MCP2120_DONGLE,
.open = mcp2120_open,
.close = mcp2120_close,
.reset = mcp2120_reset,
.set_speed = mcp2120_change_speed,
};
static int __init mcp2120_sir_init(void)
{
return irda_register_dongle(&mcp2120);
}
static void __exit mcp2120_sir_cleanup(void)
{
irda_unregister_dongle(&mcp2120);
}
static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
IRDA_DEBUG(2, "%s()\n", __func__);
/* seems no explicit power-on required here and reset switching it on anyway */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits = 0x01;
irda_qos_bits_to_value(qos);
return 0;
}
static int mcp2120_close(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
/* reset and inhibit mcp2120 */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
// sirdev_set_dtr_rts(dev, FALSE, FALSE);
return 0;
}
/*
* Function mcp2120_change_speed (dev, speed)
*
* Set the speed for the MCP2120.
*
*/
#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
u8 control[2];
static int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
/* Set DTR to enter command mode */
sirdev_set_dtr_rts(dev, TRUE, FALSE);
udelay(500);
ret = 0;
switch (speed) {
default:
speed = 9600;
ret = -EINVAL;
/* fall through */
case 9600:
control[0] = MCP2120_9600;
//printk("mcp2120 9600\n");
break;
case 19200:
control[0] = MCP2120_19200;
//printk("mcp2120 19200\n");
break;
case 34800:
control[0] = MCP2120_38400;
//printk("mcp2120 38400\n");
break;
case 57600:
control[0] = MCP2120_57600;
//printk("mcp2120 57600\n");
break;
case 115200:
control[0] = MCP2120_115200;
//printk("mcp2120 115200\n");
break;
}
control[1] = MCP2120_COMMIT;
/* Write control bytes */
sirdev_raw_write(dev, control, 2);
dev->speed = speed;
state = MCP2120_STATE_WAIT_SPEED;
delay = 100;
//printk("mcp2120_change_speed: dongle_speed\n");
break;
case MCP2120_STATE_WAIT_SPEED:
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
//printk("mcp2120_change_speed: mcp_wait\n");
break;
default:
IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
/*
* Function mcp2120_reset (driver)
*
* This function resets the mcp2120 dongle.
*
* Info: -set RTS to reset mcp2120
* -set DTR to set mcp2120 software command mode
* -mcp2120 defaults to 9600 baud after reset
*
* Algorithm:
* 0. Set RTS to reset mcp2120.
* 1. Clear RTS and wait for device reset timer of 30 ms (max).
*
*/
#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
static int mcp2120_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
//printk("mcp2120_reset: dongle_reset\n");
/* Reset dongle by setting RTS*/
sirdev_set_dtr_rts(dev, TRUE, TRUE);
state = MCP2120_STATE_WAIT1_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT1_RESET:
//printk("mcp2120_reset: mcp2120_wait1\n");
/* clear RTS and wait for at least 30 ms. */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
state = MCP2120_STATE_WAIT2_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT2_RESET:
//printk("mcp2120_reset mcp2120_wait2\n");
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
break;
default:
IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
MODULE_DESCRIPTION("Microchip MCP2120");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
module_init(mcp2120_sir_init);
module_exit(mcp2120_sir_cleanup);
| gpl-2.0 |
hallovveen31/M8_JUST_ONE_KERNEL | drivers/net/irda/mcp2120-sir.c | 13297 | 5867 | /*********************************************************************
*
*
* Filename: mcp2120.c
* Version: 1.0
* Description: Implementation for the MCP2120 (Microchip)
* Status: Experimental.
* Author: Felix Tang (tangf@eyetap.org)
* Created at: Sun Mar 31 19:32:12 EST 2002
* Based on code by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 2002 Felix Tang, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
static int mcp2120_reset(struct sir_dev *dev);
static int mcp2120_open(struct sir_dev *dev);
static int mcp2120_close(struct sir_dev *dev);
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
#define MCP2120_9600 0x87
#define MCP2120_19200 0x8B
#define MCP2120_38400 0x85
#define MCP2120_57600 0x83
#define MCP2120_115200 0x81
#define MCP2120_COMMIT 0x11
static struct dongle_driver mcp2120 = {
.owner = THIS_MODULE,
.driver_name = "Microchip MCP2120",
.type = IRDA_MCP2120_DONGLE,
.open = mcp2120_open,
.close = mcp2120_close,
.reset = mcp2120_reset,
.set_speed = mcp2120_change_speed,
};
static int __init mcp2120_sir_init(void)
{
return irda_register_dongle(&mcp2120);
}
static void __exit mcp2120_sir_cleanup(void)
{
irda_unregister_dongle(&mcp2120);
}
static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
IRDA_DEBUG(2, "%s()\n", __func__);
/* seems no explicit power-on required here and reset switching it on anyway */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits = 0x01;
irda_qos_bits_to_value(qos);
return 0;
}
static int mcp2120_close(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
/* reset and inhibit mcp2120 */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
// sirdev_set_dtr_rts(dev, FALSE, FALSE);
return 0;
}
/*
* Function mcp2120_change_speed (dev, speed)
*
* Set the speed for the MCP2120.
*
*/
#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
u8 control[2];
static int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
/* Set DTR to enter command mode */
sirdev_set_dtr_rts(dev, TRUE, FALSE);
udelay(500);
ret = 0;
switch (speed) {
default:
speed = 9600;
ret = -EINVAL;
/* fall through */
case 9600:
control[0] = MCP2120_9600;
//printk("mcp2120 9600\n");
break;
case 19200:
control[0] = MCP2120_19200;
//printk("mcp2120 19200\n");
break;
case 34800:
control[0] = MCP2120_38400;
//printk("mcp2120 38400\n");
break;
case 57600:
control[0] = MCP2120_57600;
//printk("mcp2120 57600\n");
break;
case 115200:
control[0] = MCP2120_115200;
//printk("mcp2120 115200\n");
break;
}
control[1] = MCP2120_COMMIT;
/* Write control bytes */
sirdev_raw_write(dev, control, 2);
dev->speed = speed;
state = MCP2120_STATE_WAIT_SPEED;
delay = 100;
//printk("mcp2120_change_speed: dongle_speed\n");
break;
case MCP2120_STATE_WAIT_SPEED:
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
//printk("mcp2120_change_speed: mcp_wait\n");
break;
default:
IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
/*
* Function mcp2120_reset (driver)
*
* This function resets the mcp2120 dongle.
*
* Info: -set RTS to reset mcp2120
* -set DTR to set mcp2120 software command mode
* -mcp2120 defaults to 9600 baud after reset
*
* Algorithm:
* 0. Set RTS to reset mcp2120.
* 1. Clear RTS and wait for device reset timer of 30 ms (max).
*
*/
#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
static int mcp2120_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
//printk("mcp2120_reset: dongle_reset\n");
/* Reset dongle by setting RTS*/
sirdev_set_dtr_rts(dev, TRUE, TRUE);
state = MCP2120_STATE_WAIT1_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT1_RESET:
//printk("mcp2120_reset: mcp2120_wait1\n");
/* clear RTS and wait for at least 30 ms. */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
state = MCP2120_STATE_WAIT2_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT2_RESET:
//printk("mcp2120_reset mcp2120_wait2\n");
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
break;
default:
IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
MODULE_DESCRIPTION("Microchip MCP2120");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
module_init(mcp2120_sir_init);
module_exit(mcp2120_sir_cleanup);
| gpl-2.0 |
carlocaione/linux-next | arch/ia64/mm/extable.c | 13297 | 3019 | /*
* Kernel exception handling table support. Derived from arch/alpha/mm/extable.c.
*
* Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/sort.h>
#include <asm/uaccess.h>
#include <linux/module.h>
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *l = a, *r = b;
u64 lip = (u64) &l->addr + l->addr;
u64 rip = (u64) &r->addr + r->addr;
/* avoid overflow */
if (lip > rip)
return 1;
if (lip < rip)
return -1;
return 0;
}
static void swap_ex(void *a, void *b, int size)
{
struct exception_table_entry *l = a, *r = b, tmp;
u64 delta = (u64) r - (u64) l;
tmp = *l;
l->addr = r->addr + delta;
l->cont = r->cont + delta;
r->addr = tmp.addr - delta;
r->cont = tmp.cont - delta;
}
/*
* Sort the exception table. It's usually already sorted, but there
* may be unordered entries due to multiple text sections (such as the
* .init text section). Note that the exception-table-entries contain
* location-relative addresses, which requires a bit of care during
* sorting to avoid overflows in the offset members (e.g., it would
* not be safe to make a temporary copy of an exception-table entry on
* the stack, because the stack may be more than 2GB away from the
* exception-table).
*/
void sort_extable (struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, swap_ex);
}
static inline unsigned long ex_to_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->addr + x->addr;
}
#ifdef CONFIG_MODULES
/*
* Any entry referring to the module init will be at the beginning or
* the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries &&
within_module_init(ex_to_addr(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]),
m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
const struct exception_table_entry *
search_extable (const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long ip)
{
const struct exception_table_entry *mid;
unsigned long mid_ip;
long diff;
while (first <= last) {
mid = &first[(last - first)/2];
mid_ip = (u64) &mid->addr + mid->addr;
diff = mid_ip - ip;
if (diff == 0)
return mid;
else if (diff < 0)
first = mid + 1;
else
last = mid - 1;
}
return NULL;
}
void
ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
{
long fix = (u64) &e->cont + e->cont;
regs->r8 = -EFAULT;
if (fix & 4)
regs->r9 = 0;
regs->cr_iip = fix & ~0xf;
ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */
}
| gpl-2.0 |
kgp700/Neok-GNexroid-Kernel-JB | drivers/net/irda/mcp2120-sir.c | 13297 | 5867 | /*********************************************************************
*
*
* Filename: mcp2120.c
* Version: 1.0
* Description: Implementation for the MCP2120 (Microchip)
* Status: Experimental.
* Author: Felix Tang (tangf@eyetap.org)
* Created at: Sun Mar 31 19:32:12 EST 2002
* Based on code by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 2002 Felix Tang, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
static int mcp2120_reset(struct sir_dev *dev);
static int mcp2120_open(struct sir_dev *dev);
static int mcp2120_close(struct sir_dev *dev);
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
#define MCP2120_9600 0x87
#define MCP2120_19200 0x8B
#define MCP2120_38400 0x85
#define MCP2120_57600 0x83
#define MCP2120_115200 0x81
#define MCP2120_COMMIT 0x11
static struct dongle_driver mcp2120 = {
.owner = THIS_MODULE,
.driver_name = "Microchip MCP2120",
.type = IRDA_MCP2120_DONGLE,
.open = mcp2120_open,
.close = mcp2120_close,
.reset = mcp2120_reset,
.set_speed = mcp2120_change_speed,
};
static int __init mcp2120_sir_init(void)
{
return irda_register_dongle(&mcp2120);
}
static void __exit mcp2120_sir_cleanup(void)
{
irda_unregister_dongle(&mcp2120);
}
static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
IRDA_DEBUG(2, "%s()\n", __func__);
/* seems no explicit power-on required here and reset switching it on anyway */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits = 0x01;
irda_qos_bits_to_value(qos);
return 0;
}
static int mcp2120_close(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
/* reset and inhibit mcp2120 */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
// sirdev_set_dtr_rts(dev, FALSE, FALSE);
return 0;
}
/*
* Function mcp2120_change_speed (dev, speed)
*
* Set the speed for the MCP2120.
*
*/
#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
u8 control[2];
static int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
/* Set DTR to enter command mode */
sirdev_set_dtr_rts(dev, TRUE, FALSE);
udelay(500);
ret = 0;
switch (speed) {
default:
speed = 9600;
ret = -EINVAL;
/* fall through */
case 9600:
control[0] = MCP2120_9600;
//printk("mcp2120 9600\n");
break;
case 19200:
control[0] = MCP2120_19200;
//printk("mcp2120 19200\n");
break;
case 34800:
control[0] = MCP2120_38400;
//printk("mcp2120 38400\n");
break;
case 57600:
control[0] = MCP2120_57600;
//printk("mcp2120 57600\n");
break;
case 115200:
control[0] = MCP2120_115200;
//printk("mcp2120 115200\n");
break;
}
control[1] = MCP2120_COMMIT;
/* Write control bytes */
sirdev_raw_write(dev, control, 2);
dev->speed = speed;
state = MCP2120_STATE_WAIT_SPEED;
delay = 100;
//printk("mcp2120_change_speed: dongle_speed\n");
break;
case MCP2120_STATE_WAIT_SPEED:
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
//printk("mcp2120_change_speed: mcp_wait\n");
break;
default:
IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
/*
* Function mcp2120_reset (driver)
*
* This function resets the mcp2120 dongle.
*
* Info: -set RTS to reset mcp2120
* -set DTR to set mcp2120 software command mode
* -mcp2120 defaults to 9600 baud after reset
*
* Algorithm:
* 0. Set RTS to reset mcp2120.
* 1. Clear RTS and wait for device reset timer of 30 ms (max).
*
*/
#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
static int mcp2120_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
//printk("mcp2120_reset: dongle_reset\n");
/* Reset dongle by setting RTS*/
sirdev_set_dtr_rts(dev, TRUE, TRUE);
state = MCP2120_STATE_WAIT1_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT1_RESET:
//printk("mcp2120_reset: mcp2120_wait1\n");
/* clear RTS and wait for at least 30 ms. */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
state = MCP2120_STATE_WAIT2_RESET;
delay = 50;
break;
case MCP2120_STATE_WAIT2_RESET:
//printk("mcp2120_reset mcp2120_wait2\n");
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
break;
default:
IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
MODULE_DESCRIPTION("Microchip MCP2120");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
module_init(mcp2120_sir_init);
module_exit(mcp2120_sir_cleanup);
| gpl-2.0 |
allenbh/ntrdma | sound/pci/emu10k1/emu10k1_main.c | 242 | 70801 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Creative Labs, Inc.
* Routines for control of EMU10K1 chips
*
* Copyright (c) by James Courtier-Dutton <James@superbug.co.uk>
* Added support for Audigy 2 Value.
* Added EMU 1010 support.
* General bug fixes and enhancements.
*
*
* BUGS:
* --
*
* TODO:
* --
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/emu10k1.h>
#include <linux/firmware.h>
#include "p16v.h"
#include "tina2.h"
#include "p17v.h"
#define HANA_FILENAME "emu/hana.fw"
#define DOCK_FILENAME "emu/audio_dock.fw"
#define EMU1010B_FILENAME "emu/emu1010b.fw"
#define MICRO_DOCK_FILENAME "emu/micro_dock.fw"
#define EMU0404_FILENAME "emu/emu0404.fw"
#define EMU1010_NOTEBOOK_FILENAME "emu/emu1010_notebook.fw"
MODULE_FIRMWARE(HANA_FILENAME);
MODULE_FIRMWARE(DOCK_FILENAME);
MODULE_FIRMWARE(EMU1010B_FILENAME);
MODULE_FIRMWARE(MICRO_DOCK_FILENAME);
MODULE_FIRMWARE(EMU0404_FILENAME);
MODULE_FIRMWARE(EMU1010_NOTEBOOK_FILENAME);
/*************************************************************************
* EMU10K1 init / done
*************************************************************************/
void snd_emu10k1_voice_init(struct snd_emu10k1 *emu, int ch)
{
snd_emu10k1_ptr_write(emu, DCYSUSV, ch, 0);
snd_emu10k1_ptr_write(emu, IP, ch, 0);
snd_emu10k1_ptr_write(emu, VTFT, ch, 0xffff);
snd_emu10k1_ptr_write(emu, CVCF, ch, 0xffff);
snd_emu10k1_ptr_write(emu, PTRX, ch, 0);
snd_emu10k1_ptr_write(emu, CPF, ch, 0);
snd_emu10k1_ptr_write(emu, CCR, ch, 0);
snd_emu10k1_ptr_write(emu, PSST, ch, 0);
snd_emu10k1_ptr_write(emu, DSL, ch, 0x10);
snd_emu10k1_ptr_write(emu, CCCA, ch, 0);
snd_emu10k1_ptr_write(emu, Z1, ch, 0);
snd_emu10k1_ptr_write(emu, Z2, ch, 0);
snd_emu10k1_ptr_write(emu, FXRT, ch, 0x32100000);
snd_emu10k1_ptr_write(emu, ATKHLDM, ch, 0);
snd_emu10k1_ptr_write(emu, DCYSUSM, ch, 0);
snd_emu10k1_ptr_write(emu, IFATN, ch, 0xffff);
snd_emu10k1_ptr_write(emu, PEFE, ch, 0);
snd_emu10k1_ptr_write(emu, FMMOD, ch, 0);
snd_emu10k1_ptr_write(emu, TREMFRQ, ch, 24); /* 1 Hz */
snd_emu10k1_ptr_write(emu, FM2FRQ2, ch, 24); /* 1 Hz */
snd_emu10k1_ptr_write(emu, TEMPENV, ch, 0);
/*** these are last so OFF prevents writing ***/
snd_emu10k1_ptr_write(emu, LFOVAL2, ch, 0);
snd_emu10k1_ptr_write(emu, LFOVAL1, ch, 0);
snd_emu10k1_ptr_write(emu, ATKHLDV, ch, 0);
snd_emu10k1_ptr_write(emu, ENVVOL, ch, 0);
snd_emu10k1_ptr_write(emu, ENVVAL, ch, 0);
/* Audigy extra stuffs */
if (emu->audigy) {
snd_emu10k1_ptr_write(emu, 0x4c, ch, 0); /* ?? */
snd_emu10k1_ptr_write(emu, 0x4d, ch, 0); /* ?? */
snd_emu10k1_ptr_write(emu, 0x4e, ch, 0); /* ?? */
snd_emu10k1_ptr_write(emu, 0x4f, ch, 0); /* ?? */
snd_emu10k1_ptr_write(emu, A_FXRT1, ch, 0x03020100);
snd_emu10k1_ptr_write(emu, A_FXRT2, ch, 0x3f3f3f3f);
snd_emu10k1_ptr_write(emu, A_SENDAMOUNTS, ch, 0);
}
}
static unsigned int spi_dac_init[] = {
0x00ff,
0x02ff,
0x0400,
0x0520,
0x0600,
0x08ff,
0x0aff,
0x0cff,
0x0eff,
0x10ff,
0x1200,
0x1400,
0x1480,
0x1800,
0x1aff,
0x1cff,
0x1e00,
0x0530,
0x0602,
0x0622,
0x1400,
};
static unsigned int i2c_adc_init[][2] = {
{ 0x17, 0x00 }, /* Reset */
{ 0x07, 0x00 }, /* Timeout */
{ 0x0b, 0x22 }, /* Interface control */
{ 0x0c, 0x22 }, /* Master mode control */
{ 0x0d, 0x08 }, /* Powerdown control */
{ 0x0e, 0xcf }, /* Attenuation Left 0x01 = -103dB, 0xff = 24dB */
{ 0x0f, 0xcf }, /* Attenuation Right 0.5dB steps */
{ 0x10, 0x7b }, /* ALC Control 1 */
{ 0x11, 0x00 }, /* ALC Control 2 */
{ 0x12, 0x32 }, /* ALC Control 3 */
{ 0x13, 0x00 }, /* Noise gate control */
{ 0x14, 0xa6 }, /* Limiter control */
{ 0x15, ADC_MUX_2 }, /* ADC Mixer control. Mic for A2ZS Notebook */
};
static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
{
unsigned int silent_page;
int ch;
u32 tmp;
/* disable audio and lock cache */
outl(HCFG_LOCKSOUNDCACHE | HCFG_LOCKTANKCACHE_MASK |
HCFG_MUTEBUTTONENABLE, emu->port + HCFG);
/* reset recording buffers */
snd_emu10k1_ptr_write(emu, MICBS, 0, ADCBS_BUFSIZE_NONE);
snd_emu10k1_ptr_write(emu, MICBA, 0, 0);
snd_emu10k1_ptr_write(emu, FXBS, 0, ADCBS_BUFSIZE_NONE);
snd_emu10k1_ptr_write(emu, FXBA, 0, 0);
snd_emu10k1_ptr_write(emu, ADCBS, 0, ADCBS_BUFSIZE_NONE);
snd_emu10k1_ptr_write(emu, ADCBA, 0, 0);
/* disable channel interrupt */
outl(0, emu->port + INTE);
snd_emu10k1_ptr_write(emu, CLIEL, 0, 0);
snd_emu10k1_ptr_write(emu, CLIEH, 0, 0);
snd_emu10k1_ptr_write(emu, SOLEL, 0, 0);
snd_emu10k1_ptr_write(emu, SOLEH, 0, 0);
if (emu->audigy) {
/* set SPDIF bypass mode */
snd_emu10k1_ptr_write(emu, SPBYPASS, 0, SPBYPASS_FORMAT);
/* enable rear left + rear right AC97 slots */
snd_emu10k1_ptr_write(emu, AC97SLOT, 0, AC97SLOT_REAR_RIGHT |
AC97SLOT_REAR_LEFT);
}
/* init envelope engine */
for (ch = 0; ch < NUM_G; ch++)
snd_emu10k1_voice_init(emu, ch);
snd_emu10k1_ptr_write(emu, SPCS0, 0, emu->spdif_bits[0]);
snd_emu10k1_ptr_write(emu, SPCS1, 0, emu->spdif_bits[1]);
snd_emu10k1_ptr_write(emu, SPCS2, 0, emu->spdif_bits[2]);
if (emu->card_capabilities->ca0151_chip) { /* audigy2 */
/* Hacks for Alice3 to work independent of haP16V driver */
/* Setup SRCMulti_I2S SamplingRate */
tmp = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0);
tmp &= 0xfffff1ff;
tmp |= (0x2<<9);
snd_emu10k1_ptr_write(emu, A_SPDIF_SAMPLERATE, 0, tmp);
/* Setup SRCSel (Enable Spdif,I2S SRCMulti) */
snd_emu10k1_ptr20_write(emu, SRCSel, 0, 0x14);
/* Setup SRCMulti Input Audio Enable */
/* Use 0xFFFFFFFF to enable P16V sounds. */
snd_emu10k1_ptr20_write(emu, SRCMULTI_ENABLE, 0, 0xFFFFFFFF);
/* Enabled Phased (8-channel) P16V playback */
outl(0x0201, emu->port + HCFG2);
/* Set playback routing. */
snd_emu10k1_ptr20_write(emu, CAPTURE_P16V_SOURCE, 0, 0x78e4);
}
if (emu->card_capabilities->ca0108_chip) { /* audigy2 Value */
/* Hacks for Alice3 to work independent of haP16V driver */
dev_info(emu->card->dev, "Audigy2 value: Special config.\n");
/* Setup SRCMulti_I2S SamplingRate */
tmp = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0);
tmp &= 0xfffff1ff;
tmp |= (0x2<<9);
snd_emu10k1_ptr_write(emu, A_SPDIF_SAMPLERATE, 0, tmp);
/* Setup SRCSel (Enable Spdif,I2S SRCMulti) */
outl(0x600000, emu->port + 0x20);
outl(0x14, emu->port + 0x24);
/* Setup SRCMulti Input Audio Enable */
outl(0x7b0000, emu->port + 0x20);
outl(0xFF000000, emu->port + 0x24);
/* Setup SPDIF Out Audio Enable */
/* The Audigy 2 Value has a separate SPDIF out,
* so no need for a mixer switch
*/
outl(0x7a0000, emu->port + 0x20);
outl(0xFF000000, emu->port + 0x24);
tmp = inl(emu->port + A_IOCFG) & ~0x8; /* Clear bit 3 */
outl(tmp, emu->port + A_IOCFG);
}
if (emu->card_capabilities->spi_dac) { /* Audigy 2 ZS Notebook with DAC Wolfson WM8768/WM8568 */
int size, n;
size = ARRAY_SIZE(spi_dac_init);
for (n = 0; n < size; n++)
snd_emu10k1_spi_write(emu, spi_dac_init[n]);
snd_emu10k1_ptr20_write(emu, 0x60, 0, 0x10);
/* Enable GPIOs
* GPIO0: Unknown
* GPIO1: Speakers-enabled.
* GPIO2: Unknown
* GPIO3: Unknown
* GPIO4: IEC958 Output on.
* GPIO5: Unknown
* GPIO6: Unknown
* GPIO7: Unknown
*/
outl(0x76, emu->port + A_IOCFG); /* Windows uses 0x3f76 */
}
if (emu->card_capabilities->i2c_adc) { /* Audigy 2 ZS Notebook with ADC Wolfson WM8775 */
int size, n;
snd_emu10k1_ptr20_write(emu, P17V_I2S_SRC_SEL, 0, 0x2020205f);
tmp = inl(emu->port + A_IOCFG);
outl(tmp | 0x4, emu->port + A_IOCFG); /* Set bit 2 for mic input */
tmp = inl(emu->port + A_IOCFG);
size = ARRAY_SIZE(i2c_adc_init);
for (n = 0; n < size; n++)
snd_emu10k1_i2c_write(emu, i2c_adc_init[n][0], i2c_adc_init[n][1]);
for (n = 0; n < 4; n++) {
emu->i2c_capture_volume[n][0] = 0xcf;
emu->i2c_capture_volume[n][1] = 0xcf;
}
}
snd_emu10k1_ptr_write(emu, PTB, 0, emu->ptb_pages.addr);
snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
for (ch = 0; ch < NUM_G; ch++) {
snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
}
if (emu->card_capabilities->emu_model) {
outl(HCFG_AUTOMUTE_ASYNC |
HCFG_EMU32_SLAVE |
HCFG_AUDIOENABLE, emu->port + HCFG);
/*
* Hokay, setup HCFG
* Mute Disable Audio = 0
* Lock Tank Memory = 1
* Lock Sound Memory = 0
* Auto Mute = 1
*/
} else if (emu->audigy) {
if (emu->revision == 4) /* audigy2 */
outl(HCFG_AUDIOENABLE |
HCFG_AC3ENABLE_CDSPDIF |
HCFG_AC3ENABLE_GPSPDIF |
HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG);
else
outl(HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG);
/* FIXME: Remove all these emu->model and replace it with a card recognition parameter,
* e.g. card_capabilities->joystick */
} else if (emu->model == 0x20 ||
emu->model == 0xc400 ||
(emu->model == 0x21 && emu->revision < 6))
outl(HCFG_LOCKTANKCACHE_MASK | HCFG_AUTOMUTE, emu->port + HCFG);
else
/* With on-chip joystick */
outl(HCFG_LOCKTANKCACHE_MASK | HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG);
if (enable_ir) { /* enable IR for SB Live */
if (emu->card_capabilities->emu_model) {
; /* Disable all access to A_IOCFG for the emu1010 */
} else if (emu->card_capabilities->i2c_adc) {
; /* Disable A_IOCFG for Audigy 2 ZS Notebook */
} else if (emu->audigy) {
unsigned int reg = inl(emu->port + A_IOCFG);
outl(reg | A_IOCFG_GPOUT2, emu->port + A_IOCFG);
udelay(500);
outl(reg | A_IOCFG_GPOUT1 | A_IOCFG_GPOUT2, emu->port + A_IOCFG);
udelay(100);
outl(reg, emu->port + A_IOCFG);
} else {
unsigned int reg = inl(emu->port + HCFG);
outl(reg | HCFG_GPOUT2, emu->port + HCFG);
udelay(500);
outl(reg | HCFG_GPOUT1 | HCFG_GPOUT2, emu->port + HCFG);
udelay(100);
outl(reg, emu->port + HCFG);
}
}
if (emu->card_capabilities->emu_model) {
; /* Disable all access to A_IOCFG for the emu1010 */
} else if (emu->card_capabilities->i2c_adc) {
; /* Disable A_IOCFG for Audigy 2 ZS Notebook */
} else if (emu->audigy) { /* enable analog output */
unsigned int reg = inl(emu->port + A_IOCFG);
outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
}
if (emu->address_mode == 0) {
/* use 16M in 4G */
outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
}
return 0;
}
static void snd_emu10k1_audio_enable(struct snd_emu10k1 *emu)
{
/*
* Enable the audio bit
*/
outl(inl(emu->port + HCFG) | HCFG_AUDIOENABLE, emu->port + HCFG);
/* Enable analog/digital outs on audigy */
if (emu->card_capabilities->emu_model) {
; /* Disable all access to A_IOCFG for the emu1010 */
} else if (emu->card_capabilities->i2c_adc) {
; /* Disable A_IOCFG for Audigy 2 ZS Notebook */
} else if (emu->audigy) {
outl(inl(emu->port + A_IOCFG) & ~0x44, emu->port + A_IOCFG);
if (emu->card_capabilities->ca0151_chip) { /* audigy2 */
/* Unmute Analog now. Set GPO6 to 1 for Apollo.
* This has to be done after init ALice3 I2SOut beyond 48KHz.
* So, sequence is important. */
outl(inl(emu->port + A_IOCFG) | 0x0040, emu->port + A_IOCFG);
} else if (emu->card_capabilities->ca0108_chip) { /* audigy2 value */
/* Unmute Analog now. */
outl(inl(emu->port + A_IOCFG) | 0x0060, emu->port + A_IOCFG);
} else {
/* Disable routing from AC97 line out to Front speakers */
outl(inl(emu->port + A_IOCFG) | 0x0080, emu->port + A_IOCFG);
}
}
#if 0
{
unsigned int tmp;
/* FIXME: the following routine disables LiveDrive-II !! */
/* TOSLink detection */
emu->tos_link = 0;
tmp = inl(emu->port + HCFG);
if (tmp & (HCFG_GPINPUT0 | HCFG_GPINPUT1)) {
outl(tmp|0x800, emu->port + HCFG);
udelay(50);
if (tmp != (inl(emu->port + HCFG) & ~0x800)) {
emu->tos_link = 1;
outl(tmp, emu->port + HCFG);
}
}
}
#endif
snd_emu10k1_intr_enable(emu, INTE_PCIERRORENABLE);
}
int snd_emu10k1_done(struct snd_emu10k1 *emu)
{
int ch;
outl(0, emu->port + INTE);
/*
* Shutdown the chip
*/
for (ch = 0; ch < NUM_G; ch++)
snd_emu10k1_ptr_write(emu, DCYSUSV, ch, 0);
for (ch = 0; ch < NUM_G; ch++) {
snd_emu10k1_ptr_write(emu, VTFT, ch, 0);
snd_emu10k1_ptr_write(emu, CVCF, ch, 0);
snd_emu10k1_ptr_write(emu, PTRX, ch, 0);
snd_emu10k1_ptr_write(emu, CPF, ch, 0);
}
/* reset recording buffers */
snd_emu10k1_ptr_write(emu, MICBS, 0, 0);
snd_emu10k1_ptr_write(emu, MICBA, 0, 0);
snd_emu10k1_ptr_write(emu, FXBS, 0, 0);
snd_emu10k1_ptr_write(emu, FXBA, 0, 0);
snd_emu10k1_ptr_write(emu, FXWC, 0, 0);
snd_emu10k1_ptr_write(emu, ADCBS, 0, ADCBS_BUFSIZE_NONE);
snd_emu10k1_ptr_write(emu, ADCBA, 0, 0);
snd_emu10k1_ptr_write(emu, TCBS, 0, TCBS_BUFFSIZE_16K);
snd_emu10k1_ptr_write(emu, TCB, 0, 0);
if (emu->audigy)
snd_emu10k1_ptr_write(emu, A_DBG, 0, A_DBG_SINGLE_STEP);
else
snd_emu10k1_ptr_write(emu, DBG, 0, EMU10K1_DBG_SINGLE_STEP);
/* disable channel interrupt */
snd_emu10k1_ptr_write(emu, CLIEL, 0, 0);
snd_emu10k1_ptr_write(emu, CLIEH, 0, 0);
snd_emu10k1_ptr_write(emu, SOLEL, 0, 0);
snd_emu10k1_ptr_write(emu, SOLEH, 0, 0);
/* disable audio and lock cache */
outl(HCFG_LOCKSOUNDCACHE | HCFG_LOCKTANKCACHE_MASK | HCFG_MUTEBUTTONENABLE, emu->port + HCFG);
snd_emu10k1_ptr_write(emu, PTB, 0, 0);
return 0;
}
/*************************************************************************
* ECARD functional implementation
*************************************************************************/
/* In A1 Silicon, these bits are in the HC register */
#define HOOKN_BIT (1L << 12)
#define HANDN_BIT (1L << 11)
#define PULSEN_BIT (1L << 10)
#define EC_GDI1 (1 << 13)
#define EC_GDI0 (1 << 14)
#define EC_NUM_CONTROL_BITS 20
#define EC_AC3_DATA_SELN 0x0001L
#define EC_EE_DATA_SEL 0x0002L
#define EC_EE_CNTRL_SELN 0x0004L
#define EC_EECLK 0x0008L
#define EC_EECS 0x0010L
#define EC_EESDO 0x0020L
#define EC_TRIM_CSN 0x0040L
#define EC_TRIM_SCLK 0x0080L
#define EC_TRIM_SDATA 0x0100L
#define EC_TRIM_MUTEN 0x0200L
#define EC_ADCCAL 0x0400L
#define EC_ADCRSTN 0x0800L
#define EC_DACCAL 0x1000L
#define EC_DACMUTEN 0x2000L
#define EC_LEDN 0x4000L
#define EC_SPDIF0_SEL_SHIFT 15
#define EC_SPDIF1_SEL_SHIFT 17
#define EC_SPDIF0_SEL_MASK (0x3L << EC_SPDIF0_SEL_SHIFT)
#define EC_SPDIF1_SEL_MASK (0x7L << EC_SPDIF1_SEL_SHIFT)
#define EC_SPDIF0_SELECT(_x) (((_x) << EC_SPDIF0_SEL_SHIFT) & EC_SPDIF0_SEL_MASK)
#define EC_SPDIF1_SELECT(_x) (((_x) << EC_SPDIF1_SEL_SHIFT) & EC_SPDIF1_SEL_MASK)
#define EC_CURRENT_PROM_VERSION 0x01 /* Self-explanatory. This should
* be incremented any time the EEPROM's
* format is changed. */
#define EC_EEPROM_SIZE 0x40 /* ECARD EEPROM has 64 16-bit words */
/* Addresses for special values stored in to EEPROM */
#define EC_PROM_VERSION_ADDR 0x20 /* Address of the current prom version */
#define EC_BOARDREV0_ADDR 0x21 /* LSW of board rev */
#define EC_BOARDREV1_ADDR 0x22 /* MSW of board rev */
#define EC_LAST_PROMFILE_ADDR 0x2f
#define EC_SERIALNUM_ADDR 0x30 /* First word of serial number. The
* can be up to 30 characters in length
* and is stored as a NULL-terminated
* ASCII string. Any unused bytes must be
* filled with zeros */
#define EC_CHECKSUM_ADDR 0x3f /* Location at which checksum is stored */
/* Most of this stuff is pretty self-evident. According to the hardware
* dudes, we need to leave the ADCCAL bit low in order to avoid a DC
* offset problem. Weird.
*/
#define EC_RAW_RUN_MODE (EC_DACMUTEN | EC_ADCRSTN | EC_TRIM_MUTEN | \
EC_TRIM_CSN)
#define EC_DEFAULT_ADC_GAIN 0xC4C4
#define EC_DEFAULT_SPDIF0_SEL 0x0
#define EC_DEFAULT_SPDIF1_SEL 0x4
/**************************************************************************
* @func Clock bits into the Ecard's control latch. The Ecard uses a
* control latch will is loaded bit-serially by toggling the Modem control
* lines from function 2 on the E8010. This function hides these details
* and presents the illusion that we are actually writing to a distinct
* register.
*/
static void snd_emu10k1_ecard_write(struct snd_emu10k1 *emu, unsigned int value)
{
unsigned short count;
unsigned int data;
unsigned long hc_port;
unsigned int hc_value;
hc_port = emu->port + HCFG;
hc_value = inl(hc_port) & ~(HOOKN_BIT | HANDN_BIT | PULSEN_BIT);
outl(hc_value, hc_port);
for (count = 0; count < EC_NUM_CONTROL_BITS; count++) {
/* Set up the value */
data = ((value & 0x1) ? PULSEN_BIT : 0);
value >>= 1;
outl(hc_value | data, hc_port);
/* Clock the shift register */
outl(hc_value | data | HANDN_BIT, hc_port);
outl(hc_value | data, hc_port);
}
/* Latch the bits */
outl(hc_value | HOOKN_BIT, hc_port);
outl(hc_value, hc_port);
}
/**************************************************************************
* @func Set the gain of the ECARD's CS3310 Trim/gain controller. The
* trim value consists of a 16bit value which is composed of two
* 8 bit gain/trim values, one for the left channel and one for the
* right channel. The following table maps from the Gain/Attenuation
* value in decibels into the corresponding bit pattern for a single
* channel.
*/
static void snd_emu10k1_ecard_setadcgain(struct snd_emu10k1 *emu,
unsigned short gain)
{
unsigned int bit;
/* Enable writing to the TRIM registers */
snd_emu10k1_ecard_write(emu, emu->ecard_ctrl & ~EC_TRIM_CSN);
/* Do it again to insure that we meet hold time requirements */
snd_emu10k1_ecard_write(emu, emu->ecard_ctrl & ~EC_TRIM_CSN);
for (bit = (1 << 15); bit; bit >>= 1) {
unsigned int value;
value = emu->ecard_ctrl & ~(EC_TRIM_CSN | EC_TRIM_SDATA);
if (gain & bit)
value |= EC_TRIM_SDATA;
/* Clock the bit */
snd_emu10k1_ecard_write(emu, value);
snd_emu10k1_ecard_write(emu, value | EC_TRIM_SCLK);
snd_emu10k1_ecard_write(emu, value);
}
snd_emu10k1_ecard_write(emu, emu->ecard_ctrl);
}
static int snd_emu10k1_ecard_init(struct snd_emu10k1 *emu)
{
unsigned int hc_value;
/* Set up the initial settings */
emu->ecard_ctrl = EC_RAW_RUN_MODE |
EC_SPDIF0_SELECT(EC_DEFAULT_SPDIF0_SEL) |
EC_SPDIF1_SELECT(EC_DEFAULT_SPDIF1_SEL);
/* Step 0: Set the codec type in the hardware control register
* and enable audio output */
hc_value = inl(emu->port + HCFG);
outl(hc_value | HCFG_AUDIOENABLE | HCFG_CODECFORMAT_I2S, emu->port + HCFG);
inl(emu->port + HCFG);
/* Step 1: Turn off the led and deassert TRIM_CS */
snd_emu10k1_ecard_write(emu, EC_ADCCAL | EC_LEDN | EC_TRIM_CSN);
/* Step 2: Calibrate the ADC and DAC */
snd_emu10k1_ecard_write(emu, EC_DACCAL | EC_LEDN | EC_TRIM_CSN);
/* Step 3: Wait for awhile; XXX We can't get away with this
* under a real operating system; we'll need to block and wait that
* way. */
snd_emu10k1_wait(emu, 48000);
/* Step 4: Switch off the DAC and ADC calibration. Note
* That ADC_CAL is actually an inverted signal, so we assert
* it here to stop calibration. */
snd_emu10k1_ecard_write(emu, EC_ADCCAL | EC_LEDN | EC_TRIM_CSN);
/* Step 4: Switch into run mode */
snd_emu10k1_ecard_write(emu, emu->ecard_ctrl);
/* Step 5: Set the analog input gain */
snd_emu10k1_ecard_setadcgain(emu, EC_DEFAULT_ADC_GAIN);
return 0;
}
static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu)
{
unsigned long special_port;
unsigned int value;
/* Special initialisation routine
* before the rest of the IO-Ports become active.
*/
special_port = emu->port + 0x38;
value = inl(special_port);
outl(0x00d00000, special_port);
value = inl(special_port);
outl(0x00d00001, special_port);
value = inl(special_port);
outl(0x00d0005f, special_port);
value = inl(special_port);
outl(0x00d0007f, special_port);
value = inl(special_port);
outl(0x0090007f, special_port);
value = inl(special_port);
snd_emu10k1_ptr20_write(emu, TINA2_VOLUME, 0, 0xfefefefe); /* Defaults to 0x30303030 */
/* Delay to give time for ADC chip to switch on. It needs 113ms */
msleep(200);
return 0;
}
static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
const struct firmware *fw_entry)
{
int n, i;
int reg;
int value;
unsigned int write_post;
unsigned long flags;
if (!fw_entry)
return -EIO;
/* The FPGA is a Xilinx Spartan IIE XC2S50E */
/* GPIO7 -> FPGA PGMN
* GPIO6 -> FPGA CCLK
* GPIO5 -> FPGA DIN
* FPGA CONFIG OFF -> FPGA PGMN
*/
spin_lock_irqsave(&emu->emu_lock, flags);
outl(0x00, emu->port + A_IOCFG); /* Set PGMN low for 1uS. */
write_post = inl(emu->port + A_IOCFG);
udelay(100);
outl(0x80, emu->port + A_IOCFG); /* Leave bit 7 set during netlist setup. */
write_post = inl(emu->port + A_IOCFG);
udelay(100); /* Allow FPGA memory to clean */
for (n = 0; n < fw_entry->size; n++) {
value = fw_entry->data[n];
for (i = 0; i < 8; i++) {
reg = 0x80;
if (value & 0x1)
reg = reg | 0x20;
value = value >> 1;
outl(reg, emu->port + A_IOCFG);
write_post = inl(emu->port + A_IOCFG);
outl(reg | 0x40, emu->port + A_IOCFG);
write_post = inl(emu->port + A_IOCFG);
}
}
/* After programming, set GPIO bit 4 high again. */
outl(0x10, emu->port + A_IOCFG);
write_post = inl(emu->port + A_IOCFG);
spin_unlock_irqrestore(&emu->emu_lock, flags);
return 0;
}
static int emu1010_firmware_thread(void *data)
{
struct snd_emu10k1 *emu = data;
u32 tmp, tmp2, reg;
u32 last_reg = 0;
int err;
for (;;) {
/* Delay to allow Audio Dock to settle */
msleep_interruptible(1000);
if (kthread_should_stop())
break;
#ifdef CONFIG_PM_SLEEP
if (emu->suspend)
continue;
#endif
snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®); /* OPTIONS: Which cards are attached to the EMU */
if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
/* Audio Dock attached */
/* Return to Audio Dock programming mode */
dev_info(emu->card->dev,
"emu1010: Loading Audio Dock Firmware\n");
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK);
if (!emu->dock_fw) {
const char *filename = NULL;
switch (emu->card_capabilities->emu_model) {
case EMU_MODEL_EMU1010:
filename = DOCK_FILENAME;
break;
case EMU_MODEL_EMU1010B:
filename = MICRO_DOCK_FILENAME;
break;
case EMU_MODEL_EMU1616:
filename = MICRO_DOCK_FILENAME;
break;
}
if (filename) {
err = request_firmware(&emu->dock_fw,
filename,
&emu->pci->dev);
if (err)
continue;
}
}
if (emu->dock_fw) {
err = snd_emu1010_load_firmware(emu, emu->dock_fw);
if (err)
continue;
}
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, ®);
dev_info(emu->card->dev,
"emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n",
reg);
/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
snd_emu1010_fpga_read(emu, EMU_HANA_ID, ®);
dev_info(emu->card->dev,
"emu1010: EMU_HANA+DOCK_ID = 0x%x\n", reg);
if ((reg & 0x1f) != 0x15) {
/* FPGA failed to be programmed */
dev_info(emu->card->dev,
"emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
reg);
continue;
}
dev_info(emu->card->dev,
"emu1010: Audio Dock Firmware loaded\n");
snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n",
tmp, tmp2);
/* Sync clocking between 1010 and Dock */
/* Allow DLL to settle */
msleep(10);
/* Unmute all. Default is muted after a firmware load */
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
} else if (!reg && last_reg) {
/* Audio Dock removed */
dev_info(emu->card->dev,
"emu1010: Audio Dock detached\n");
/* Unmute all */
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
}
last_reg = reg;
}
dev_info(emu->card->dev, "emu1010: firmware thread stopping\n");
return 0;
}
/*
* EMU-1010 - details found out from this driver, official MS Win drivers,
* testing the card:
*
* Audigy2 (aka Alice2):
* ---------------------
* * communication over PCI
* * conversion of 32-bit data coming over EMU32 links from HANA FPGA
* to 2 x 16-bit, using internal DSP instructions
* * slave mode, clock supplied by HANA
* * linked to HANA using:
* 32 x 32-bit serial EMU32 output channels
* 16 x EMU32 input channels
* (?) x I2S I/O channels (?)
*
* FPGA (aka HANA):
* ---------------
* * provides all (?) physical inputs and outputs of the card
* (ADC, DAC, SPDIF I/O, ADAT I/O, etc.)
* * provides clock signal for the card and Alice2
* * two crystals - for 44.1kHz and 48kHz multiples
* * provides internal routing of signal sources to signal destinations
* * inputs/outputs to Alice2 - see above
*
* Current status of the driver:
* ----------------------------
* * only 44.1/48kHz supported (the MS Win driver supports up to 192 kHz)
* * PCM device nb. 2:
* 16 x 16-bit playback - snd_emu10k1_fx8010_playback_ops
* 16 x 32-bit capture - snd_emu10k1_capture_efx_ops
*/
static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
{
unsigned int i;
u32 tmp, tmp2, reg;
int err;
dev_info(emu->card->dev, "emu1010: Special config.\n");
/* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave,
* Lock Sound Memory Cache, Lock Tank Memory Cache,
* Mute all codecs.
*/
outl(0x0005a00c, emu->port + HCFG);
/* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave,
* Lock Tank Memory Cache,
* Mute all codecs.
*/
outl(0x0005a004, emu->port + HCFG);
/* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave,
* Mute all codecs.
*/
outl(0x0005a000, emu->port + HCFG);
/* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave,
* Mute all codecs.
*/
outl(0x0005a000, emu->port + HCFG);
/* Disable 48Volt power to Audio Dock */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
/* ID, should read & 0x7f = 0x55. (Bit 7 is the IRQ bit) */
snd_emu1010_fpga_read(emu, EMU_HANA_ID, ®);
dev_dbg(emu->card->dev, "reg1 = 0x%x\n", reg);
if ((reg & 0x3f) == 0x15) {
/* FPGA netlist already present so clear it */
/* Return to programming mode */
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0x02);
}
snd_emu1010_fpga_read(emu, EMU_HANA_ID, ®);
dev_dbg(emu->card->dev, "reg2 = 0x%x\n", reg);
if ((reg & 0x3f) == 0x15) {
/* FPGA failed to return to programming mode */
dev_info(emu->card->dev,
"emu1010: FPGA failed to return to programming mode\n");
return -ENODEV;
}
dev_info(emu->card->dev, "emu1010: EMU_HANA_ID = 0x%x\n", reg);
if (!emu->firmware) {
const char *filename;
switch (emu->card_capabilities->emu_model) {
case EMU_MODEL_EMU1010:
filename = HANA_FILENAME;
break;
case EMU_MODEL_EMU1010B:
filename = EMU1010B_FILENAME;
break;
case EMU_MODEL_EMU1616:
filename = EMU1010_NOTEBOOK_FILENAME;
break;
case EMU_MODEL_EMU0404:
filename = EMU0404_FILENAME;
break;
default:
return -ENODEV;
}
err = request_firmware(&emu->firmware, filename, &emu->pci->dev);
if (err != 0) {
dev_info(emu->card->dev,
"emu1010: firmware: %s not found. Err = %d\n",
filename, err);
return err;
}
dev_info(emu->card->dev,
"emu1010: firmware file = %s, size = 0x%zx\n",
filename, emu->firmware->size);
}
err = snd_emu1010_load_firmware(emu, emu->firmware);
if (err != 0) {
dev_info(emu->card->dev, "emu1010: Loading Firmware failed\n");
return err;
}
/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
snd_emu1010_fpga_read(emu, EMU_HANA_ID, ®);
if ((reg & 0x3f) != 0x15) {
/* FPGA failed to be programmed */
dev_info(emu->card->dev,
"emu1010: Loading Hana Firmware file failed, reg = 0x%x\n",
reg);
return -ENODEV;
}
dev_info(emu->card->dev, "emu1010: Hana Firmware loaded\n");
snd_emu1010_fpga_read(emu, EMU_HANA_MAJOR_REV, &tmp);
snd_emu1010_fpga_read(emu, EMU_HANA_MINOR_REV, &tmp2);
dev_info(emu->card->dev, "emu1010: Hana version: %u.%u\n", tmp, tmp2);
/* Enable 48Volt power to Audio Dock */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, EMU_HANA_DOCK_PWR_ON);
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®);
dev_info(emu->card->dev, "emu1010: Card options = 0x%x\n", reg);
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®);
dev_info(emu->card->dev, "emu1010: Card options = 0x%x\n", reg);
snd_emu1010_fpga_read(emu, EMU_HANA_OPTICAL_TYPE, &tmp);
/* Optical -> ADAT I/O */
/* 0 : SPDIF
* 1 : ADAT
*/
emu->emu1010.optical_in = 1; /* IN_ADAT */
emu->emu1010.optical_out = 1; /* IN_ADAT */
tmp = 0;
tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : 0) |
(emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : 0);
snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp);
snd_emu1010_fpga_read(emu, EMU_HANA_ADC_PADS, &tmp);
/* Set no attenuation on Audio Dock pads. */
snd_emu1010_fpga_write(emu, EMU_HANA_ADC_PADS, 0x00);
emu->emu1010.adc_pads = 0x00;
snd_emu1010_fpga_read(emu, EMU_HANA_DOCK_MISC, &tmp);
/* Unmute Audio dock DACs, Headphone source DAC-4. */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_MISC, 0x30);
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12);
snd_emu1010_fpga_read(emu, EMU_HANA_DAC_PADS, &tmp);
/* DAC PADs. */
snd_emu1010_fpga_write(emu, EMU_HANA_DAC_PADS, 0x0f);
emu->emu1010.dac_pads = 0x0f;
snd_emu1010_fpga_read(emu, EMU_HANA_DOCK_MISC, &tmp);
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_MISC, 0x30);
snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp);
/* SPDIF Format. Set Consumer mode, 24bit, copy enable */
snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10);
/* MIDI routing */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19);
/* Unknown. */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c);
/* IRQ Enable: All on */
/* snd_emu1010_fpga_write(emu, 0x09, 0x0f ); */
/* IRQ Enable: All off */
snd_emu1010_fpga_write(emu, EMU_HANA_IRQ_ENABLE, 0x00);
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®);
dev_info(emu->card->dev, "emu1010: Card options3 = 0x%x\n", reg);
/* Default WCLK set to 48kHz. */
snd_emu1010_fpga_write(emu, EMU_HANA_DEFCLOCK, 0x00);
/* Word Clock source, Internal 48kHz x1 */
snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K);
/* snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K | EMU_HANA_WCLOCK_4X); */
/* Audio Dock LEDs. */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12);
#if 0
/* For 96kHz */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_0, EMU_SRC_HAMOA_ADC_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_1, EMU_SRC_HAMOA_ADC_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_4, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_5, EMU_SRC_HAMOA_ADC_RIGHT2);
#endif
#if 0
/* For 192kHz */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_0, EMU_SRC_HAMOA_ADC_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_1, EMU_SRC_HAMOA_ADC_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_2, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_3, EMU_SRC_HAMOA_ADC_RIGHT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_4, EMU_SRC_HAMOA_ADC_LEFT3);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_5, EMU_SRC_HAMOA_ADC_RIGHT3);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_6, EMU_SRC_HAMOA_ADC_LEFT4);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_7, EMU_SRC_HAMOA_ADC_RIGHT4);
#endif
#if 1
/* For 48kHz */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_0, EMU_SRC_DOCK_MIC_A1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_1, EMU_SRC_DOCK_MIC_B1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_2, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_3, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_4, EMU_SRC_DOCK_ADC1_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_5, EMU_SRC_DOCK_ADC1_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_6, EMU_SRC_DOCK_ADC2_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_7, EMU_SRC_DOCK_ADC2_RIGHT1);
/* Pavel Hofman - setting defaults for 8 more capture channels
* Defaults only, users will set their own values anyways, let's
* just copy/paste.
*/
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_8, EMU_SRC_DOCK_MIC_A1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_9, EMU_SRC_DOCK_MIC_B1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_A, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_B, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_C, EMU_SRC_DOCK_ADC1_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_D, EMU_SRC_DOCK_ADC1_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_E, EMU_SRC_DOCK_ADC2_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_F, EMU_SRC_DOCK_ADC2_RIGHT1);
#endif
#if 0
/* Original */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_4, EMU_SRC_HANA_ADAT);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_5, EMU_SRC_HANA_ADAT + 1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_6, EMU_SRC_HANA_ADAT + 2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_7, EMU_SRC_HANA_ADAT + 3);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_8, EMU_SRC_HANA_ADAT + 4);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_9, EMU_SRC_HANA_ADAT + 5);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_A, EMU_SRC_HANA_ADAT + 6);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_B, EMU_SRC_HANA_ADAT + 7);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_C, EMU_SRC_DOCK_MIC_A1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_D, EMU_SRC_DOCK_MIC_B1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_E, EMU_SRC_HAMOA_ADC_LEFT2);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE2_EMU32_F, EMU_SRC_HAMOA_ADC_LEFT2);
#endif
for (i = 0; i < 0x20; i++) {
/* AudioDock Elink <- Silence */
snd_emu1010_fpga_link_dst_src_write(emu, 0x0100 + i, EMU_SRC_SILENCE);
}
for (i = 0; i < 4; i++) {
/* Hana SPDIF Out <- Silence */
snd_emu1010_fpga_link_dst_src_write(emu, 0x0200 + i, EMU_SRC_SILENCE);
}
for (i = 0; i < 7; i++) {
/* Hamoa DAC <- Silence */
snd_emu1010_fpga_link_dst_src_write(emu, 0x0300 + i, EMU_SRC_SILENCE);
}
for (i = 0; i < 7; i++) {
/* Hana ADAT Out <- Silence */
snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + i, EMU_SRC_SILENCE);
}
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S0_LEFT, EMU_SRC_DOCK_ADC1_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S0_RIGHT, EMU_SRC_DOCK_ADC1_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S1_LEFT, EMU_SRC_DOCK_ADC2_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S1_RIGHT, EMU_SRC_DOCK_ADC2_RIGHT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S2_LEFT, EMU_SRC_DOCK_ADC3_LEFT1);
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_ALICE_I2S2_RIGHT, EMU_SRC_DOCK_ADC3_RIGHT1);
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x01); /* Unmute all */
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &tmp);
/* AC97 1.03, Any 32Meg of 2Gig address, Auto-Mute, EMU32 Slave,
* Lock Sound Memory Cache, Lock Tank Memory Cache,
* Mute all codecs.
*/
outl(0x0000a000, emu->port + HCFG);
/* AC97 1.03, Any 32Meg of 2Gig address, Auto-Mute, EMU32 Slave,
* Lock Sound Memory Cache, Lock Tank Memory Cache,
* Un-Mute all codecs.
*/
outl(0x0000a001, emu->port + HCFG);
/* Initial boot complete. Now patches */
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &tmp);
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19); /* MIDI Route */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c); /* Unknown */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19); /* MIDI Route */
snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c); /* Unknown */
snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp);
snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10); /* SPDIF Format spdif (or 0x11 for aes/ebu) */
/* Start Micro/Audio Dock firmware loader thread */
if (!emu->emu1010.firmware_thread) {
emu->emu1010.firmware_thread =
kthread_create(emu1010_firmware_thread, emu,
"emu1010_firmware");
if (IS_ERR(emu->emu1010.firmware_thread)) {
err = PTR_ERR(emu->emu1010.firmware_thread);
emu->emu1010.firmware_thread = NULL;
dev_info(emu->card->dev,
"emu1010: Creating thread failed\n");
return err;
}
wake_up_process(emu->emu1010.firmware_thread);
}
#if 0
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32B + 2); /* ALICE2 bus 0xa2 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HAMOA_DAC_RIGHT1, EMU_SRC_ALICE_EMU32B + 3); /* ALICE2 bus 0xa3 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 2); /* ALICE2 bus 0xb2 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 3); /* ALICE2 bus 0xb3 */
#endif
/* Default outputs */
if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1616) {
/* 1616(M) cardbus default outputs */
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC1_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[0] = 17;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC1_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[1] = 18;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC2_LEFT1, EMU_SRC_ALICE_EMU32A + 2);
emu->emu1010.output_source[2] = 19;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC2_RIGHT1, EMU_SRC_ALICE_EMU32A + 3);
emu->emu1010.output_source[3] = 20;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC3_LEFT1, EMU_SRC_ALICE_EMU32A + 4);
emu->emu1010.output_source[4] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC3_RIGHT1, EMU_SRC_ALICE_EMU32A + 5);
emu->emu1010.output_source[5] = 22;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_MANA_DAC_LEFT, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[16] = 17;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_MANA_DAC_RIGHT, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[17] = 18;
} else {
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC1_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[0] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC1_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[1] = 22;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC2_LEFT1, EMU_SRC_ALICE_EMU32A + 2);
emu->emu1010.output_source[2] = 23;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC2_RIGHT1, EMU_SRC_ALICE_EMU32A + 3);
emu->emu1010.output_source[3] = 24;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC3_LEFT1, EMU_SRC_ALICE_EMU32A + 4);
emu->emu1010.output_source[4] = 25;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC3_RIGHT1, EMU_SRC_ALICE_EMU32A + 5);
emu->emu1010.output_source[5] = 26;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC4_LEFT1, EMU_SRC_ALICE_EMU32A + 6);
emu->emu1010.output_source[6] = 27;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_DAC4_RIGHT1, EMU_SRC_ALICE_EMU32A + 7);
emu->emu1010.output_source[7] = 28;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_PHONES_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[8] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_PHONES_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[9] = 22;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[10] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_DOCK_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[11] = 22;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[12] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[13] = 22;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[14] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HAMOA_DAC_RIGHT1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[15] = 22;
/* ALICE2 bus 0xa0 */
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT, EMU_SRC_ALICE_EMU32A + 0);
emu->emu1010.output_source[16] = 21;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 1, EMU_SRC_ALICE_EMU32A + 1);
emu->emu1010.output_source[17] = 22;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 2, EMU_SRC_ALICE_EMU32A + 2);
emu->emu1010.output_source[18] = 23;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 3, EMU_SRC_ALICE_EMU32A + 3);
emu->emu1010.output_source[19] = 24;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 4, EMU_SRC_ALICE_EMU32A + 4);
emu->emu1010.output_source[20] = 25;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 5, EMU_SRC_ALICE_EMU32A + 5);
emu->emu1010.output_source[21] = 26;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 6, EMU_SRC_ALICE_EMU32A + 6);
emu->emu1010.output_source[22] = 27;
snd_emu1010_fpga_link_dst_src_write(emu,
EMU_DST_HANA_ADAT + 7, EMU_SRC_ALICE_EMU32A + 7);
emu->emu1010.output_source[23] = 28;
}
/* TEMP: Select SPDIF in/out */
/* snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, 0x0); */ /* Output spdif */
/* TEMP: Select 48kHz SPDIF out */
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x0); /* Mute all */
snd_emu1010_fpga_write(emu, EMU_HANA_DEFCLOCK, 0x0); /* Default fallback clock 48kHz */
/* Word Clock source, Internal 48kHz x1 */
snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K);
/* snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K | EMU_HANA_WCLOCK_4X); */
emu->emu1010.internal_clock = 1; /* 48000 */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12); /* Set LEDs on Audio Dock */
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x1); /* Unmute all */
/* snd_emu1010_fpga_write(emu, 0x7, 0x0); */ /* Mute all */
/* snd_emu1010_fpga_write(emu, 0x7, 0x1); */ /* Unmute all */
/* snd_emu1010_fpga_write(emu, 0xe, 0x12); */ /* Set LEDs on Audio Dock */
return 0;
}
/*
* Create the EMU10K1 instance
*/
#ifdef CONFIG_PM_SLEEP
static int alloc_pm_buffer(struct snd_emu10k1 *emu);
static void free_pm_buffer(struct snd_emu10k1 *emu);
#endif
static int snd_emu10k1_free(struct snd_emu10k1 *emu)
{
if (emu->port) { /* avoid access to already used hardware */
snd_emu10k1_fx8010_tram_setup(emu, 0);
snd_emu10k1_done(emu);
snd_emu10k1_free_efx(emu);
}
if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1010) {
/* Disable 48Volt power to Audio Dock */
snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
}
if (emu->emu1010.firmware_thread)
kthread_stop(emu->emu1010.firmware_thread);
release_firmware(emu->firmware);
release_firmware(emu->dock_fw);
if (emu->irq >= 0)
free_irq(emu->irq, emu);
/* remove reserved page */
if (emu->reserved_page) {
snd_emu10k1_synth_free(emu,
(struct snd_util_memblk *)emu->reserved_page);
emu->reserved_page = NULL;
}
snd_util_memhdr_free(emu->memhdr);
if (emu->silent_page.area)
snd_dma_free_pages(&emu->silent_page);
if (emu->ptb_pages.area)
snd_dma_free_pages(&emu->ptb_pages);
vfree(emu->page_ptr_table);
vfree(emu->page_addr_table);
#ifdef CONFIG_PM_SLEEP
free_pm_buffer(emu);
#endif
if (emu->port)
pci_release_regions(emu->pci);
if (emu->card_capabilities->ca0151_chip) /* P16V */
snd_p16v_free(emu);
pci_disable_device(emu->pci);
kfree(emu);
return 0;
}
static int snd_emu10k1_dev_free(struct snd_device *device)
{
struct snd_emu10k1 *emu = device->device_data;
return snd_emu10k1_free(emu);
}
static struct snd_emu_chip_details emu_chip_details[] = {
/* Audigy 5/Rx SB1550 */
/* Tested by michael@gernoth.net 28 Mar 2015 */
/* DSP: CA10300-IAT LF
* DAC: Cirrus Logic CS4382-KQZ
* ADC: Philips 1361T
* AC97: Sigmatel STAC9750
* CA0151: None
*/
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10241102,
.driver = "Audigy2", .name = "SB Audigy 5/Rx [SB1550]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.adc_1361t = 1, /* 24 bit capture instead of 16bit */
.ac97_chip = 1},
/* Audigy4 (Not PRO) SB0610 */
/* Tested by James@superbug.co.uk 4th April 2006 */
/* A_IOCFG bits
* Output
* 0: ?
* 1: ?
* 2: ?
* 3: 0 - Digital Out, 1 - Line in
* 4: ?
* 5: ?
* 6: ?
* 7: ?
* Input
* 8: ?
* 9: ?
* A: Green jack sense (Front)
* B: ?
* C: Black jack sense (Rear/Side Right)
* D: Yellow jack sense (Center/LFE/Side Left)
* E: ?
* F: ?
*
* Digital Out/Line in switch using A_IOCFG bit 3 (0x08)
* 0 - Digital Out
* 1 - Line in
*/
/* Mic input not tested.
* Analog CD input not tested
* Digital Out not tested.
* Line in working.
* Audio output 5.1 working. Side outputs not working.
*/
/* DSP: CA10300-IAT LF
* DAC: Cirrus Logic CS4382-KQZ
* ADC: Philips 1361T
* AC97: Sigmatel STAC9750
* CA0151: None
*/
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10211102,
.driver = "Audigy2", .name = "SB Audigy 4 [SB0610]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.adc_1361t = 1, /* 24 bit capture instead of 16bit */
.ac97_chip = 1} ,
/* Audigy 2 Value AC3 out does not work yet.
* Need to find out how to turn off interpolators.
*/
/* Tested by James@superbug.co.uk 3rd July 2005 */
/* DSP: CA0108-IAT
* DAC: CS4382-KQ
* ADC: Philips 1361T
* AC97: STAC9750
* CA0151: None
*/
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10011102,
.driver = "Audigy2", .name = "SB Audigy 2 Value [SB0400]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.ac97_chip = 1} ,
/* Audigy 2 ZS Notebook Cardbus card.*/
/* Tested by James@superbug.co.uk 6th November 2006 */
/* Audio output 7.1/Headphones working.
* Digital output working. (AC3 not checked, only PCM)
* Audio Mic/Line inputs working.
* Digital input not tested.
*/
/* DSP: Tina2
* DAC: Wolfson WM8768/WM8568
* ADC: Wolfson WM8775
* AC97: None
* CA0151: None
*/
/* Tested by James@superbug.co.uk 4th April 2006 */
/* A_IOCFG bits
* Output
* 0: Not Used
* 1: 0 = Mute all the 7.1 channel out. 1 = unmute.
* 2: Analog input 0 = line in, 1 = mic in
* 3: Not Used
* 4: Digital output 0 = off, 1 = on.
* 5: Not Used
* 6: Not Used
* 7: Not Used
* Input
* All bits 1 (0x3fxx) means nothing plugged in.
* 8-9: 0 = Line in/Mic, 2 = Optical in, 3 = Nothing.
* A-B: 0 = Headphones, 2 = Optical out, 3 = Nothing.
* C-D: 2 = Front/Rear/etc, 3 = nothing.
* E-F: Always 0
*
*/
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
.driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.ca_cardbus_chip = 1,
.spi_dac = 1,
.i2c_adc = 1,
.spk71 = 1} ,
/* Tested by James@superbug.co.uk 4th Nov 2007. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x42011102,
.driver = "Audigy2", .name = "E-mu 1010 Notebook [MAEM8950]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.ca_cardbus_chip = 1,
.spk71 = 1 ,
.emu_model = EMU_MODEL_EMU1616},
/* Tested by James@superbug.co.uk 4th Nov 2007. */
/* This is MAEM8960, 0202 is MAEM 8980 */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40041102,
.driver = "Audigy2", .name = "E-mu 1010b PCI [MAEM8960]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
/* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
/* This is MAEM8986, 0202 is MAEM8980 */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
.driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
/* Tested by James@superbug.co.uk 8th July 2005. */
/* This is MAEM8810, 0202 is MAEM8820 */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
.driver = "Audigy2", .name = "E-mu 1010 [MAEM8810]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU1010}, /* EMU 1010 old revision */
/* EMU0404b */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40021102,
.driver = "Audigy2", .name = "E-mu 0404b PCI [MAEM8852]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 new revision */
/* Tested by James@superbug.co.uk 20-3-2007. */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40021102,
.driver = "Audigy2", .name = "E-mu 0404 [MAEM8850]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
/* EMU0404 PCIe */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
.driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.spk71 = 1,
.emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
/* Note that all E-mu cards require kernel 2.6 or newer. */
{.vendor = 0x1102, .device = 0x0008,
.driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
.ac97_chip = 1} ,
/* Tested by James@superbug.co.uk 3rd July 2005 */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20071102,
.driver = "Audigy2", .name = "SB Audigy 4 PRO [SB0380]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.ac97_chip = 1} ,
/* Tested by shane-alsa@cm.nu 5th Nov 2005 */
/* The 0x20061102 does have SB0350 written on it
* Just like 0x20021102
*/
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20061102,
.driver = "Audigy2", .name = "SB Audigy 2 [SB0350b]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.ac97_chip = 1} ,
/* 0x20051102 also has SB0350 written on it, treated as Audigy 2 ZS by
Creative's Windows driver */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20051102,
.driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0350a]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20021102,
.driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0350]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20011102,
.driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0360]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.ac97_chip = 1} ,
/* Audigy 2 */
/* Tested by James@superbug.co.uk 3rd July 2005 */
/* DSP: CA0102-IAT
* DAC: CS4382-KQ
* ADC: Philips 1361T
* AC97: STAC9721
* CA0151: Yes
*/
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10071102,
.driver = "Audigy2", .name = "SB Audigy 2 [SB0240]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.adc_1361t = 1, /* 24 bit capture instead of 16bit */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
.driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1} ,
/* Dell OEM/Creative Labs Audigy 2 ZS */
/* See ALSA bug#1365 */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10031102,
.driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0353]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102,
.driver = "Audigy2", .name = "SB Audigy 2 Platinum [SB0240P]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spk71 = 1,
.spdif_bug = 1,
.invert_shared_spdif = 1, /* digital/analog switch swapped */
.adc_1361t = 1, /* 24 bit capture instead of 16bit. Fixes ALSA bug#324 */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .revision = 0x04,
.driver = "Audigy2", .name = "SB Audigy 2 [Unknown]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ca0151_chip = 1,
.spdif_bug = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00531102,
.driver = "Audigy", .name = "SB Audigy 1 [SB0092]",
.id = "Audigy",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00521102,
.driver = "Audigy", .name = "SB Audigy 1 ES [SB0160]",
.id = "Audigy",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.spdif_bug = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00511102,
.driver = "Audigy", .name = "SB Audigy 1 [SB0090]",
.id = "Audigy",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004,
.driver = "Audigy", .name = "Audigy 1 [Unknown]",
.id = "Audigy",
.emu10k2_chip = 1,
.ca0102_chip = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x100a1102,
.driver = "EMU10K1", .name = "SB Live! 5.1 [SB0220]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x806b1102,
.driver = "EMU10K1", .name = "SB Live! [SB0105]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x806a1102,
.driver = "EMU10K1", .name = "SB Live! Value [SB0103]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80691102,
.driver = "EMU10K1", .name = "SB Live! Value [SB0101]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
/* Tested by ALSA bug#1680 26th December 2005 */
/* note: It really has SB0220 written on the card, */
/* but it's SB0228 according to kx.inf */
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80661102,
.driver = "EMU10K1", .name = "SB Live! 5.1 Dell OEM [SB0228]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
/* Tested by Thomas Zehetbauer 27th Aug 2005 */
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80651102,
.driver = "EMU10K1", .name = "SB Live! 5.1 [SB0220]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80641102,
.driver = "EMU10K1", .name = "SB Live! 5.1",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
/* Tested by alsa bugtrack user "hus" bug #1297 12th Aug 2005 */
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102,
.driver = "EMU10K1", .name = "SB Live! 5.1 [SB0060]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 2, /* ac97 is optional; both SBLive 5.1 and platinum
* share the same IDs!
*/
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80511102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4850]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80401102,
.driver = "EMU10K1", .name = "SB Live! Platinum [CT4760P]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80321102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4871]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80311102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4831]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80281102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4870]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
/* Tested by James@superbug.co.uk 3rd July 2005 */
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80271102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4832]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80261102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4830]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80231102,
.driver = "EMU10K1", .name = "SB PCI512 [CT4790]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80221102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4780]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x40011102,
.driver = "EMU10K1", .name = "E-mu APS [PC545]",
.id = "APS",
.emu10k1_chip = 1,
.ecard = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x00211102,
.driver = "EMU10K1", .name = "SB Live! [CT4620]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x00201102,
.driver = "EMU10K1", .name = "SB Live! Value [CT4670]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002,
.driver = "EMU10K1", .name = "SB Live! [Unknown]",
.id = "Live",
.emu10k1_chip = 1,
.ac97_chip = 1,
.sblive51 = 1} ,
{ } /* terminator */
};
int snd_emu10k1_create(struct snd_card *card,
struct pci_dev *pci,
unsigned short extin_mask,
unsigned short extout_mask,
long max_cache_bytes,
int enable_ir,
uint subsystem,
struct snd_emu10k1 **remu)
{
struct snd_emu10k1 *emu;
int idx, err;
int is_audigy;
unsigned int silent_page;
const struct snd_emu_chip_details *c;
static struct snd_device_ops ops = {
.dev_free = snd_emu10k1_dev_free,
};
*remu = NULL;
/* enable PCI device */
err = pci_enable_device(pci);
if (err < 0)
return err;
emu = kzalloc(sizeof(*emu), GFP_KERNEL);
if (emu == NULL) {
pci_disable_device(pci);
return -ENOMEM;
}
emu->card = card;
spin_lock_init(&emu->reg_lock);
spin_lock_init(&emu->emu_lock);
spin_lock_init(&emu->spi_lock);
spin_lock_init(&emu->i2c_lock);
spin_lock_init(&emu->voice_lock);
spin_lock_init(&emu->synth_lock);
spin_lock_init(&emu->memblk_lock);
mutex_init(&emu->fx8010.lock);
INIT_LIST_HEAD(&emu->mapped_link_head);
INIT_LIST_HEAD(&emu->mapped_order_link_head);
emu->pci = pci;
emu->irq = -1;
emu->synth = NULL;
emu->get_synth_voice = NULL;
/* read revision & serial */
emu->revision = pci->revision;
pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial);
pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &emu->model);
dev_dbg(card->dev,
"vendor = 0x%x, device = 0x%x, subsystem_vendor_id = 0x%x, subsystem_id = 0x%x\n",
pci->vendor, pci->device, emu->serial, emu->model);
for (c = emu_chip_details; c->vendor; c++) {
if (c->vendor == pci->vendor && c->device == pci->device) {
if (subsystem) {
if (c->subsystem && (c->subsystem == subsystem))
break;
else
continue;
} else {
if (c->subsystem && (c->subsystem != emu->serial))
continue;
if (c->revision && c->revision != emu->revision)
continue;
}
break;
}
}
if (c->vendor == 0) {
dev_err(card->dev, "emu10k1: Card not recognised\n");
kfree(emu);
pci_disable_device(pci);
return -ENOENT;
}
emu->card_capabilities = c;
if (c->subsystem && !subsystem)
dev_dbg(card->dev, "Sound card name = %s\n", c->name);
else if (subsystem)
dev_dbg(card->dev, "Sound card name = %s, "
"vendor = 0x%x, device = 0x%x, subsystem = 0x%x. "
"Forced to subsystem = 0x%x\n", c->name,
pci->vendor, pci->device, emu->serial, c->subsystem);
else
dev_dbg(card->dev, "Sound card name = %s, "
"vendor = 0x%x, device = 0x%x, subsystem = 0x%x.\n",
c->name, pci->vendor, pci->device,
emu->serial);
if (!*card->id && c->id) {
int i, n = 0;
strlcpy(card->id, c->id, sizeof(card->id));
for (;;) {
for (i = 0; i < snd_ecards_limit; i++) {
if (snd_cards[i] && !strcmp(snd_cards[i]->id, card->id))
break;
}
if (i >= snd_ecards_limit)
break;
n++;
if (n >= SNDRV_CARDS)
break;
snprintf(card->id, sizeof(card->id), "%s_%d", c->id, n);
}
}
is_audigy = emu->audigy = c->emu10k2_chip;
/* set addressing mode */
emu->address_mode = is_audigy ? 0 : 1;
/* set the DMA transfer mask */
emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
if (dma_set_mask(&pci->dev, emu->dma_mask) < 0 ||
dma_set_coherent_mask(&pci->dev, emu->dma_mask) < 0) {
dev_err(card->dev,
"architecture does not support PCI busmaster DMA with mask 0x%lx\n",
emu->dma_mask);
kfree(emu);
pci_disable_device(pci);
return -ENXIO;
}
if (is_audigy)
emu->gpr_base = A_FXGPREGBASE;
else
emu->gpr_base = FXGPREGBASE;
err = pci_request_regions(pci, "EMU10K1");
if (err < 0) {
kfree(emu);
pci_disable_device(pci);
return err;
}
emu->port = pci_resource_start(pci, 0);
emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
(emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
err = -ENOMEM;
goto error;
}
emu->page_ptr_table = vmalloc(emu->max_cache_pages * sizeof(void *));
emu->page_addr_table = vmalloc(emu->max_cache_pages *
sizeof(unsigned long));
if (emu->page_ptr_table == NULL || emu->page_addr_table == NULL) {
err = -ENOMEM;
goto error;
}
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
EMUPAGESIZE, &emu->silent_page) < 0) {
err = -ENOMEM;
goto error;
}
emu->memhdr = snd_util_memhdr_new(emu->max_cache_pages * PAGE_SIZE);
if (emu->memhdr == NULL) {
err = -ENOMEM;
goto error;
}
emu->memhdr->block_extra_size = sizeof(struct snd_emu10k1_memblk) -
sizeof(struct snd_util_memblk);
pci_set_master(pci);
emu->fx8010.fxbus_mask = 0x303f;
if (extin_mask == 0)
extin_mask = 0x3fcf;
if (extout_mask == 0)
extout_mask = 0x7fff;
emu->fx8010.extin_mask = extin_mask;
emu->fx8010.extout_mask = extout_mask;
emu->enable_ir = enable_ir;
if (emu->card_capabilities->ca_cardbus_chip) {
err = snd_emu10k1_cardbus_init(emu);
if (err < 0)
goto error;
}
if (emu->card_capabilities->ecard) {
err = snd_emu10k1_ecard_init(emu);
if (err < 0)
goto error;
} else if (emu->card_capabilities->emu_model) {
err = snd_emu10k1_emu1010_init(emu);
if (err < 0) {
snd_emu10k1_free(emu);
return err;
}
} else {
/* 5.1: Enable the additional AC97 Slots. If the emu10k1 version
does not support this, it shouldn't do any harm */
snd_emu10k1_ptr_write(emu, AC97SLOT, 0,
AC97SLOT_CNTR|AC97SLOT_LFE);
}
/* initialize TRAM setup */
emu->fx8010.itram_size = (16 * 1024)/2;
emu->fx8010.etram_pages.area = NULL;
emu->fx8010.etram_pages.bytes = 0;
/* irq handler must be registered after I/O ports are activated */
if (request_irq(pci->irq, snd_emu10k1_interrupt, IRQF_SHARED,
KBUILD_MODNAME, emu)) {
err = -EBUSY;
goto error;
}
emu->irq = pci->irq;
/*
* Init to 0x02109204 :
* Clock accuracy = 0 (1000ppm)
* Sample Rate = 2 (48kHz)
* Audio Channel = 1 (Left of 2)
* Source Number = 0 (Unspecified)
* Generation Status = 1 (Original for Cat Code 12)
* Cat Code = 12 (Digital Signal Mixer)
* Mode = 0 (Mode 0)
* Emphasis = 0 (None)
* CP = 1 (Copyright unasserted)
* AN = 0 (Audio data)
* P = 0 (Consumer)
*/
emu->spdif_bits[0] = emu->spdif_bits[1] =
emu->spdif_bits[2] = SPCS_CLKACCY_1000PPM | SPCS_SAMPLERATE_48 |
SPCS_CHANNELNUM_LEFT | SPCS_SOURCENUM_UNSPEC |
SPCS_GENERATIONSTATUS | 0x00001200 |
0x00000000 | SPCS_EMPHASIS_NONE | SPCS_COPYRIGHT;
emu->reserved_page = (struct snd_emu10k1_memblk *)
snd_emu10k1_synth_alloc(emu, 4096);
if (emu->reserved_page)
emu->reserved_page->map_locked = 1;
/* Clear silent pages and set up pointers */
memset(emu->silent_page.area, 0, PAGE_SIZE);
silent_page = emu->silent_page.addr << emu->address_mode;
for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
/* set up voice indices */
for (idx = 0; idx < NUM_G; idx++) {
emu->voices[idx].emu = emu;
emu->voices[idx].number = idx;
}
err = snd_emu10k1_init(emu, enable_ir, 0);
if (err < 0)
goto error;
#ifdef CONFIG_PM_SLEEP
err = alloc_pm_buffer(emu);
if (err < 0)
goto error;
#endif
/* Initialize the effect engine */
err = snd_emu10k1_init_efx(emu);
if (err < 0)
goto error;
snd_emu10k1_audio_enable(emu);
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, emu, &ops);
if (err < 0)
goto error;
#ifdef CONFIG_SND_PROC_FS
snd_emu10k1_proc_init(emu);
#endif
*remu = emu;
return 0;
error:
snd_emu10k1_free(emu);
return err;
}
#ifdef CONFIG_PM_SLEEP
static unsigned char saved_regs[] = {
CPF, PTRX, CVCF, VTFT, Z1, Z2, PSST, DSL, CCCA, CCR, CLP,
FXRT, MAPA, MAPB, ENVVOL, ATKHLDV, DCYSUSV, LFOVAL1, ENVVAL,
ATKHLDM, DCYSUSM, LFOVAL2, IP, IFATN, PEFE, FMMOD, TREMFRQ, FM2FRQ2,
TEMPENV, ADCCR, FXWC, MICBA, ADCBA, FXBA,
MICBS, ADCBS, FXBS, CDCS, GPSCS, SPCS0, SPCS1, SPCS2,
SPBYPASS, AC97SLOT, CDSRCS, GPSRCS, ZVSRCS, MICIDX, ADCIDX, FXIDX,
0xff /* end */
};
static unsigned char saved_regs_audigy[] = {
A_ADCIDX, A_MICIDX, A_FXWC1, A_FXWC2, A_SAMPLE_RATE,
A_FXRT2, A_SENDAMOUNTS, A_FXRT1,
0xff /* end */
};
static int alloc_pm_buffer(struct snd_emu10k1 *emu)
{
int size;
size = ARRAY_SIZE(saved_regs);
if (emu->audigy)
size += ARRAY_SIZE(saved_regs_audigy);
emu->saved_ptr = vmalloc(4 * NUM_G * size);
if (!emu->saved_ptr)
return -ENOMEM;
if (snd_emu10k1_efx_alloc_pm_buffer(emu) < 0)
return -ENOMEM;
if (emu->card_capabilities->ca0151_chip &&
snd_p16v_alloc_pm_buffer(emu) < 0)
return -ENOMEM;
return 0;
}
static void free_pm_buffer(struct snd_emu10k1 *emu)
{
vfree(emu->saved_ptr);
snd_emu10k1_efx_free_pm_buffer(emu);
if (emu->card_capabilities->ca0151_chip)
snd_p16v_free_pm_buffer(emu);
}
void snd_emu10k1_suspend_regs(struct snd_emu10k1 *emu)
{
int i;
unsigned char *reg;
unsigned int *val;
val = emu->saved_ptr;
for (reg = saved_regs; *reg != 0xff; reg++)
for (i = 0; i < NUM_G; i++, val++)
*val = snd_emu10k1_ptr_read(emu, *reg, i);
if (emu->audigy) {
for (reg = saved_regs_audigy; *reg != 0xff; reg++)
for (i = 0; i < NUM_G; i++, val++)
*val = snd_emu10k1_ptr_read(emu, *reg, i);
}
if (emu->audigy)
emu->saved_a_iocfg = inl(emu->port + A_IOCFG);
emu->saved_hcfg = inl(emu->port + HCFG);
}
void snd_emu10k1_resume_init(struct snd_emu10k1 *emu)
{
if (emu->card_capabilities->ca_cardbus_chip)
snd_emu10k1_cardbus_init(emu);
if (emu->card_capabilities->ecard)
snd_emu10k1_ecard_init(emu);
else if (emu->card_capabilities->emu_model)
snd_emu10k1_emu1010_init(emu);
else
snd_emu10k1_ptr_write(emu, AC97SLOT, 0, AC97SLOT_CNTR|AC97SLOT_LFE);
snd_emu10k1_init(emu, emu->enable_ir, 1);
}
void snd_emu10k1_resume_regs(struct snd_emu10k1 *emu)
{
int i;
unsigned char *reg;
unsigned int *val;
snd_emu10k1_audio_enable(emu);
/* resore for spdif */
if (emu->audigy)
outl(emu->saved_a_iocfg, emu->port + A_IOCFG);
outl(emu->saved_hcfg, emu->port + HCFG);
val = emu->saved_ptr;
for (reg = saved_regs; *reg != 0xff; reg++)
for (i = 0; i < NUM_G; i++, val++)
snd_emu10k1_ptr_write(emu, *reg, i, *val);
if (emu->audigy) {
for (reg = saved_regs_audigy; *reg != 0xff; reg++)
for (i = 0; i < NUM_G; i++, val++)
snd_emu10k1_ptr_write(emu, *reg, i, *val);
}
}
#endif
| gpl-2.0 |
AntaresOne/AntaresCore-Kernel-h815 | drivers/power/qpnp-vm-bms.c | 242 | 111846 | /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "BMS: %s: " fmt, __func__
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/power_supply.h>
#include <linux/fcntl.h>
#include <linux/uaccess.h>
#include <linux/spmi.h>
#include <linux/wakelock.h>
#include <linux/debugfs.h>
#include <linux/qpnp/power-on.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/of_batterydata.h>
#include <linux/batterydata-interface.h>
#include <linux/qpnp-revid.h>
#include <uapi/linux/vm_bms.h>
#define _BMS_MASK(BITS, POS) \
((unsigned char)(((1 << (BITS)) - 1) << (POS)))
#define BMS_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
_BMS_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
(RIGHT_BIT_POS))
/* Config / Data registers */
#define REVISION1_REG 0x0
#define STATUS1_REG 0x8
#define FSM_STATE_MASK BMS_MASK(5, 3)
#define FSM_STATE_SHIFT 3
#define STATUS2_REG 0x9
#define FIFO_CNT_SD_MASK BMS_MASK(7, 4)
#define FIFO_CNT_SD_SHIFT 4
#define MODE_CTL_REG 0x40
#define FORCE_S3_MODE BIT(0)
#define ENABLE_S3_MODE BIT(1)
#define FORCE_S2_MODE BIT(2)
#define ENABLE_S2_MODE BIT(3)
#define S2_MODE_MASK BMS_MASK(3, 2)
#define S3_MODE_MASK BMS_MASK(1, 0)
#define DATA_CTL1_REG 0x42
#define MASTER_HOLD_BIT BIT(0)
#define DATA_CTL2_REG 0x43
#define FIFO_CNT_SD_CLR_BIT BIT(2)
#define ACC_DATA_SD_CLR_BIT BIT(1)
#define ACC_CNT_SD_CLR_BIT BIT(0)
#define S3_OCV_TOL_CTL_REG 0x44
#define EN_CTL_REG 0x46
#define BMS_EN_BIT BIT(7)
#define FIFO_LENGTH_REG 0x47
#define S1_FIFO_LENGTH_MASK BMS_MASK(3, 0)
#define S2_FIFO_LENGTH_MASK BMS_MASK(7, 4)
#define S2_FIFO_LENGTH_SHIFT 4
#define S1_SAMPLE_INTVL_REG 0x55
#define S2_SAMPLE_INTVL_REG 0x56
#define S3_SAMPLE_INTVL_REG 0x57
#define S1_ACC_CNT_REG 0x5E
#define S2_ACC_CNT_REG 0x5F
#define ACC_CNT_MASK BMS_MASK(2, 0)
#define ACC_DATA0_SD_REG 0x63
#define ACC_CNT_SD_REG 0x67
#define OCV_DATA0_REG 0x6A
#define FIFO_0_LSB_REG 0xC0
#define BMS_SOC_REG 0xB0
#define BMS_OCV_REG 0xB1 /* B1 & B2 */
#define SOC_STORAGE_MASK 0xFE
#define CHARGE_INCREASE_STORAGE 0xB3
#define CHARGE_CYCLE_STORAGE_LSB 0xB4 /* B4 & B5 */
#define SEC_ACCESS 0xD0
#define QPNP_CHARGER_PRESENT BIT(7)
/* Constants */
#define OCV_TOL_LSB_UV 300
#define MAX_OCV_TOL_THRESHOLD (OCV_TOL_LSB_UV * 0xFF)
#define MAX_SAMPLE_COUNT 256
#define MAX_SAMPLE_INTERVAL 2550
#define BMS_READ_TIMEOUT 500
#define BMS_DEFAULT_TEMP 250
#define OCV_INVALID 0xFFFF
#define SOC_INVALID 0xFF
#define OCV_UNINITIALIZED 0xFFFF
#define VBATT_ERROR_MARGIN 20000
#define CV_DROP_MARGIN 10000
#define MIN_OCV_UV 2000000
#define TIME_PER_PERCENT_UUC 60
#define IAVG_SAMPLES 16
#define MIN_SOC_UUC 3
#define QPNP_VM_BMS_DEV_NAME "qcom,qpnp-vm-bms"
/* indicates the state of BMS */
enum {
IDLE_STATE,
S1_STATE,
S2_STATE,
S3_STATE,
S7_STATE,
};
enum {
WRKARND_PON_OCV_COMP = BIT(0),
};
struct bms_irq {
int irq;
unsigned long disabled;
};
struct bms_wakeup_source {
struct wakeup_source source;
unsigned long disabled;
};
struct temp_curr_comp_map {
int temp_decideg;
int current_ma;
};
struct bms_dt_cfg {
bool cfg_report_charger_eoc;
bool cfg_force_bms_active_on_charger;
bool cfg_force_s3_on_suspend;
bool cfg_ignore_shutdown_soc;
bool cfg_use_voltage_soc;
int cfg_v_cutoff_uv;
int cfg_max_voltage_uv;
int cfg_r_conn_mohm;
int cfg_shutdown_soc_valid_limit;
int cfg_low_soc_calc_threshold;
int cfg_low_soc_calculate_soc_ms;
int cfg_low_voltage_threshold;
int cfg_low_voltage_calculate_soc_ms;
int cfg_low_soc_fifo_length;
int cfg_calculate_soc_ms;
int cfg_voltage_soc_timeout_ms;
int cfg_s1_sample_interval_ms;
int cfg_s2_sample_interval_ms;
int cfg_s1_sample_count;
int cfg_s2_sample_count;
int cfg_s1_fifo_length;
int cfg_s2_fifo_length;
int cfg_disable_bms;
int cfg_s3_ocv_tol_uv;
int cfg_soc_resume_limit;
int cfg_low_temp_threshold;
int cfg_ibat_avg_samples;
int cfg_battery_aging_comp;
bool cfg_use_reported_soc;
};
struct qpnp_bms_chip {
struct device *dev;
struct spmi_device *spmi;
dev_t dev_no;
u16 base;
u8 revision[2];
u32 batt_pres_addr;
u32 chg_pres_addr;
/* status variables */
u8 current_fsm_state;
bool last_soc_invalid;
bool warm_reset;
bool bms_psy_registered;
bool battery_full;
bool bms_dev_open;
bool data_ready;
bool apply_suspend_config;
bool in_cv_state;
bool low_soc_fifo_set;
int battery_status;
int calculated_soc;
int current_now;
int prev_current_now;
int prev_voltage_based_soc;
int calculate_soc_ms;
int voltage_soc_uv;
int battery_present;
int last_soc;
int last_soc_unbound;
int last_soc_change_sec;
int charge_start_tm_sec;
int catch_up_time_sec;
int delta_time_s;
int uuc_delta_time_s;
int ocv_at_100;
int last_ocv_uv;
int s2_fifo_length;
int last_acc;
int hi_power_state;
unsigned int vadc_v0625;
unsigned int vadc_v1250;
unsigned long tm_sec;
unsigned long workaround_flag;
unsigned long uuc_tm_sec;
u32 seq_num;
u8 shutdown_soc;
bool shutdown_soc_invalid;
u16 last_ocv_raw;
u32 shutdown_ocv;
bool suspend_data_valid;
int iavg_num_samples;
unsigned int iavg_index;
int iavg_samples_ma[IAVG_SAMPLES];
int iavg_ma;
int prev_soc_uuc;
int eoc_reported;
u8 charge_increase;
u16 charge_cycles;
unsigned int start_soc;
unsigned int end_soc;
struct bms_battery_data *batt_data;
struct bms_dt_cfg dt;
struct dentry *debug_root;
struct bms_wakeup_source vbms_lv_wake_source;
struct bms_wakeup_source vbms_cv_wake_source;
struct bms_wakeup_source vbms_soc_wake_source;
wait_queue_head_t bms_wait_q;
struct delayed_work monitor_soc_work;
struct delayed_work voltage_soc_timeout_work;
struct mutex bms_data_mutex;
struct mutex bms_device_mutex;
struct mutex last_soc_mutex;
struct mutex state_change_mutex;
struct class *bms_class;
struct device *bms_device;
struct cdev bms_cdev;
struct qpnp_vm_bms_data bms_data;
struct qpnp_vadc_chip *vadc_dev;
struct qpnp_adc_tm_chip *adc_tm_dev;
struct pmic_revid_data *revid_data;
struct qpnp_adc_tm_btm_param vbat_monitor_params;
struct bms_irq fifo_update_done_irq;
struct bms_irq fsm_state_change_irq;
struct power_supply bms_psy;
struct power_supply *batt_psy;
struct power_supply *usb_psy;
bool reported_soc_in_use;
bool charger_removed_since_full;
bool charger_reinserted;
bool reported_soc_high_current;
int reported_soc;
int reported_soc_change_sec;
int reported_soc_delta;
};
static struct qpnp_bms_chip *the_chip;
static struct temp_curr_comp_map temp_curr_comp_lut[] = {
{-300, 15},
{250, 17},
{850, 28},
};
static void disable_bms_irq(struct bms_irq *irq)
{
if (!__test_and_set_bit(0, &irq->disabled)) {
disable_irq(irq->irq);
pr_debug("disabled irq %d\n", irq->irq);
}
}
static void bms_stay_awake(struct bms_wakeup_source *source)
{
if (__test_and_clear_bit(0, &source->disabled)) {
__pm_stay_awake(&source->source);
pr_debug("enabled source %s\n", source->source.name);
}
}
static void bms_relax(struct bms_wakeup_source *source)
{
if (!__test_and_set_bit(0, &source->disabled)) {
__pm_relax(&source->source);
pr_debug("disabled source %s\n", source->source.name);
}
}
static bool bms_wake_active(struct bms_wakeup_source *source)
{
return !source->disabled;
}
static int bound_soc(int soc)
{
soc = max(0, soc);
soc = min(100, soc);
return soc;
}
static char *qpnp_vm_bms_supplicants[] = {
"battery",
};
static int qpnp_read_wrapper(struct qpnp_bms_chip *chip, u8 *val,
u16 base, int count)
{
int rc;
struct spmi_device *spmi = chip->spmi;
rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count);
if (rc)
pr_err("SPMI read failed rc=%d\n", rc);
return rc;
}
static int qpnp_write_wrapper(struct qpnp_bms_chip *chip, u8 *val,
u16 base, int count)
{
int rc;
struct spmi_device *spmi = chip->spmi;
rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count);
if (rc)
pr_err("SPMI write failed rc=%d\n", rc);
return rc;
}
static int qpnp_masked_write_base(struct qpnp_bms_chip *chip, u16 addr,
u8 mask, u8 val)
{
int rc;
u8 reg;
rc = qpnp_read_wrapper(chip, ®, addr, 1);
if (rc) {
pr_err("read failed addr = %03X, rc = %d\n", addr, rc);
return rc;
}
reg &= ~mask;
reg |= val & mask;
rc = qpnp_write_wrapper(chip, ®, addr, 1);
if (rc)
pr_err("write failed addr = %03X, val = %02x, mask = %02x, reg = %02x, rc = %d\n",
addr, val, mask, reg, rc);
return rc;
}
static int qpnp_secure_write_wrapper(struct qpnp_bms_chip *chip, u8 *val,
u16 base)
{
int rc;
u8 reg;
reg = 0xA5;
rc = qpnp_write_wrapper(chip, ®, chip->base + SEC_ACCESS, 1);
if (rc) {
pr_err("Error %d writing 0xA5 to 0x%x reg\n",
rc, SEC_ACCESS);
return rc;
}
rc = qpnp_write_wrapper(chip, val, base, 1);
if (rc)
pr_err("Error %d writing %d to 0x%x reg\n", rc, *val, base);
return rc;
}
static int backup_ocv_soc(struct qpnp_bms_chip *chip, int ocv_uv, int soc)
{
int rc;
u16 ocv_mv = ocv_uv / 1000;
rc = qpnp_write_wrapper(chip, (u8 *)&ocv_mv,
chip->base + BMS_OCV_REG, 2);
if (rc)
pr_err("Unable to backup OCV rc=%d\n", rc);
rc = qpnp_masked_write_base(chip, chip->base + BMS_SOC_REG,
SOC_STORAGE_MASK, (soc + 1) << 1);
if (rc)
pr_err("Unable to backup SOC rc=%d\n", rc);
pr_debug("ocv_mv=%d soc=%d\n", ocv_mv, soc);
return rc;
}
static int get_current_time(unsigned long *now_tm_sec)
{
struct rtc_time tm;
struct rtc_device *rtc;
int rc;
rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (rtc == NULL) {
pr_err("%s: unable to open rtc device (%s)\n",
__FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
return -EINVAL;
}
rc = rtc_read_time(rtc, &tm);
if (rc) {
pr_err("Error reading rtc device (%s) : %d\n",
CONFIG_RTC_HCTOSYS_DEVICE, rc);
goto close_time;
}
rc = rtc_valid_tm(&tm);
if (rc) {
pr_err("Invalid RTC time (%s): %d\n",
CONFIG_RTC_HCTOSYS_DEVICE, rc);
goto close_time;
}
rtc_tm_to_time(&tm, now_tm_sec);
close_time:
rtc_class_close(rtc);
return rc;
}
static int calculate_delta_time(unsigned long *time_stamp, int *delta_time_s)
{
unsigned long now_tm_sec = 0;
/* default to delta time = 0 if anything fails */
*delta_time_s = 0;
if (get_current_time(&now_tm_sec)) {
pr_err("RTC read failed\n");
return 0;
}
*delta_time_s = (now_tm_sec - *time_stamp);
/* remember this time */
*time_stamp = now_tm_sec;
return 0;
}
static bool is_charger_present(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->usb_psy == NULL)
chip->usb_psy = power_supply_get_by_name("usb");
if (chip->usb_psy) {
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_PRESENT, &ret);
return ret.intval;
}
return false;
}
static bool is_battery_charging(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->batt_psy == NULL)
chip->batt_psy = power_supply_get_by_name("battery");
if (chip->batt_psy) {
/* if battery has been registered, use the type property */
chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &ret);
return ret.intval != POWER_SUPPLY_CHARGE_TYPE_NONE;
}
/* Default to false if the battery power supply is not registered. */
pr_debug("battery power supply is not registered\n");
return false;
}
#define BAT_PRES_BIT BIT(7)
static bool is_battery_present(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
int rc;
u8 batt_pres;
/* first try to use the batt_pres register if given */
if (chip->batt_pres_addr) {
rc = qpnp_read_wrapper(chip, &batt_pres,
chip->batt_pres_addr, 1);
if (!rc && (batt_pres & BAT_PRES_BIT))
return true;
else
return false;
}
if (chip->batt_psy == NULL)
chip->batt_psy = power_supply_get_by_name("battery");
if (chip->batt_psy) {
/* if battery has been registered, use the present property */
chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_PRESENT, &ret);
return ret.intval;
}
/* Default to false if the battery power supply is not registered. */
pr_debug("battery power supply is not registered\n");
return false;
}
#define BAT_REMOVED_OFFMODE_BIT BIT(6)
static bool is_battery_replaced_in_offmode(struct qpnp_bms_chip *chip)
{
u8 batt_pres;
int rc;
if (chip->batt_pres_addr) {
rc = qpnp_read_wrapper(chip, &batt_pres,
chip->batt_pres_addr, 1);
pr_debug("offmode removed: %02x\n", batt_pres);
if (!rc && (batt_pres & BAT_REMOVED_OFFMODE_BIT))
return true;
}
return false;
}
static bool is_battery_taper_charging(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->batt_psy == NULL)
chip->batt_psy = power_supply_get_by_name("battery");
if (chip->batt_psy) {
chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &ret);
return ret.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER;
}
return false;
}
static int master_hold_control(struct qpnp_bms_chip *chip, bool enable)
{
u8 reg = 0;
int rc;
reg = enable ? MASTER_HOLD_BIT : 0;
rc = qpnp_secure_write_wrapper(chip, ®,
chip->base + DATA_CTL1_REG);
if (rc)
pr_err("Unable to write reg=%x rc=%d\n", DATA_CTL1_REG, rc);
return rc;
}
static int force_fsm_state(struct qpnp_bms_chip *chip, u8 state)
{
int rc;
u8 mode_ctl = 0;
switch (state) {
case S2_STATE:
mode_ctl = (FORCE_S2_MODE | ENABLE_S2_MODE);
break;
case S3_STATE:
mode_ctl = (FORCE_S3_MODE | ENABLE_S3_MODE);
break;
default:
pr_debug("Invalid state %d\n", state);
return -EINVAL;
}
rc = qpnp_secure_write_wrapper(chip, &mode_ctl,
chip->base + MODE_CTL_REG);
if (rc) {
pr_err("Unable to write reg=%x rc=%d\n", MODE_CTL_REG, rc);
return rc;
}
/* delay for the FSM state to take affect in hardware */
usleep_range(500, 600);
pr_debug("force_mode=%d mode_cntl_reg=%x\n", state, mode_ctl);
return 0;
}
static int get_sample_interval(struct qpnp_bms_chip *chip,
u8 fsm_state, u32 *interval)
{
int rc;
u8 val = 0, reg;
*interval = 0;
switch (fsm_state) {
case S1_STATE:
reg = S1_SAMPLE_INTVL_REG;
break;
case S2_STATE:
reg = S2_SAMPLE_INTVL_REG;
break;
case S3_STATE:
reg = S3_SAMPLE_INTVL_REG;
break;
default:
pr_err("Invalid state %d\n", fsm_state);
return -EINVAL;
}
rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1);
if (rc) {
pr_err("Failed to get state(%d) sample_interval, rc=%d\n",
fsm_state, rc);
return rc;
}
*interval = val * 10;
return 0;
}
static int get_sample_count(struct qpnp_bms_chip *chip,
u8 fsm_state, u32 *count)
{
int rc;
u8 val = 0, reg;
*count = 0;
switch (fsm_state) {
case S1_STATE:
reg = S1_ACC_CNT_REG;
break;
case S2_STATE:
reg = S2_ACC_CNT_REG;
break;
default:
pr_err("Invalid state %d\n", fsm_state);
return -EINVAL;
}
rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1);
if (rc) {
pr_err("Failed to get state(%d) sample_count, rc=%d\n",
fsm_state, rc);
return rc;
}
val &= ACC_CNT_MASK;
*count = val ? (1 << (val + 1)) : 1;
return 0;
}
static int get_fifo_length(struct qpnp_bms_chip *chip,
u8 fsm_state, u32 *fifo_length)
{
int rc;
u8 val = 0, reg, mask = 0, shift = 0;
*fifo_length = 0;
switch (fsm_state) {
case S1_STATE:
reg = FIFO_LENGTH_REG;
mask = S1_FIFO_LENGTH_MASK;
shift = 0;
break;
case S2_STATE:
reg = FIFO_LENGTH_REG;
mask = S2_FIFO_LENGTH_MASK;
shift = S2_FIFO_LENGTH_SHIFT;
break;
default:
pr_err("Invalid state %d\n", fsm_state);
return -EINVAL;
}
rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1);
if (rc) {
pr_err("Failed to get state(%d) fifo_length, rc=%d\n",
fsm_state, rc);
return rc;
}
val &= mask;
val >>= shift;
*fifo_length = val;
return 0;
}
static int set_fifo_length(struct qpnp_bms_chip *chip,
u8 fsm_state, u32 fifo_length)
{
int rc;
u8 reg, mask = 0, shift = 0;
/* fifo_length of 1 is not supported due to a hardware issue */
if ((fifo_length <= 1) || (fifo_length > MAX_FIFO_REGS)) {
pr_err("Invalid FIFO length = %d\n", fifo_length);
return -EINVAL;
}
switch (fsm_state) {
case S1_STATE:
reg = FIFO_LENGTH_REG;
mask = S1_FIFO_LENGTH_MASK;
shift = 0;
break;
case S2_STATE:
reg = FIFO_LENGTH_REG;
mask = S2_FIFO_LENGTH_MASK;
shift = S2_FIFO_LENGTH_SHIFT;
break;
default:
pr_err("Invalid state %d\n", fsm_state);
return -EINVAL;
}
rc = master_hold_control(chip, true);
if (rc)
pr_err("Unable to apply master_hold rc=%d\n", rc);
rc = qpnp_masked_write_base(chip, chip->base + reg, mask,
fifo_length << shift);
if (rc)
pr_err("Unable to set fifo length rc=%d\n", rc);
rc = master_hold_control(chip, false);
if (rc)
pr_err("Unable to apply master_hold rc=%d\n", rc);
return rc;
}
static int get_fsm_state(struct qpnp_bms_chip *chip, u8 *state)
{
int rc;
/*
* To read the STATUS1 register, write a value(any) to this register,
* wait for 10ms and then read the register.
*/
*state = 0;
rc = qpnp_write_wrapper(chip, state, chip->base + STATUS1_REG, 1);
if (rc) {
pr_err("Unable to write STATUS1_REG rc=%d\n", rc);
return rc;
}
usleep_range(10000, 11000);
/* read the current FSM state */
rc = qpnp_read_wrapper(chip, state, chip->base + STATUS1_REG, 1);
if (rc) {
pr_err("Unable to read STATUS1_REG rc=%d\n", rc);
return rc;
}
*state = (*state & FSM_STATE_MASK) >> FSM_STATE_SHIFT;
return rc;
}
static int update_fsm_state(struct qpnp_bms_chip *chip)
{
u8 state = 0;
int rc;
mutex_lock(&chip->state_change_mutex);
rc = get_fsm_state(chip, &state);
if (rc) {
pr_err("Unable to get fsm_state rc=%d\n", rc);
goto fail_fsm;
}
chip->current_fsm_state = state;
fail_fsm:
mutex_unlock(&chip->state_change_mutex);
return rc;
}
static int backup_charge_cycle(struct qpnp_bms_chip *chip)
{
int rc = 0;
if (chip->charge_increase >= 0) {
rc = qpnp_write_wrapper(chip, &chip->charge_increase,
chip->base + CHARGE_INCREASE_STORAGE, 1);
if (rc)
pr_err("Unable to backup charge_increase rc=%d\n", rc);
}
if (chip->charge_cycles >= 0) {
rc = qpnp_write_wrapper(chip, (u8 *)&chip->charge_cycles,
chip->base + CHARGE_CYCLE_STORAGE_LSB, 2);
if (rc)
pr_err("Unable to backup charge_cycles rc=%d\n", rc);
}
pr_debug("%s storing charge_increase=%u charge_cycle=%u\n",
rc ? "Unable to" : "Sucessfully",
chip->charge_increase, chip->charge_cycles);
return rc;
}
static int read_chgcycle_data_from_backup(struct qpnp_bms_chip *chip)
{
int rc;
uint16_t temp_u16 = 0;
u8 temp_u8 = 0;
rc = qpnp_read_wrapper(chip, &temp_u8,
chip->base + CHARGE_INCREASE_STORAGE, 1);
if (rc) {
pr_err("Unable to read charge_increase rc=%d\n", rc);
return rc;
}
rc = qpnp_read_wrapper(chip, (u8 *)&temp_u16,
chip->base + CHARGE_CYCLE_STORAGE_LSB, 2);
if (rc) {
pr_err("Unable to read charge_cycle rc=%d\n", rc);
return rc;
}
if ((temp_u8 == 0xFF) || (temp_u16 == 0xFFFF)) {
chip->charge_cycles = 0;
chip->charge_increase = 0;
pr_info("rejecting aging data charge_increase=%u charge_cycle=%u\n",
temp_u8, temp_u16);
rc = backup_charge_cycle(chip);
if (rc)
pr_err("Unable to reset charge cycles rc=%d\n", rc);
} else {
chip->charge_increase = temp_u8;
chip->charge_cycles = temp_u16;
}
pr_debug("charge_increase=%u charge_cycle=%u\n",
chip->charge_increase, chip->charge_cycles);
return rc;
}
static int calculate_uuc_iavg(struct qpnp_bms_chip *chip)
{
int i;
int iavg_ma = chip->current_now / 1000;
/* only continue if ibat has changed */
if (chip->current_now == chip->prev_current_now)
goto ibat_unchanged;
else
chip->prev_current_now = chip->current_now;
chip->iavg_samples_ma[chip->iavg_index] = iavg_ma;
chip->iavg_index = (chip->iavg_index + 1) %
chip->dt.cfg_ibat_avg_samples;
chip->iavg_num_samples++;
if (chip->iavg_num_samples >= chip->dt.cfg_ibat_avg_samples)
chip->iavg_num_samples = chip->dt.cfg_ibat_avg_samples;
if (chip->iavg_num_samples) {
iavg_ma = 0;
/* maintain a 16 sample average of ibat */
for (i = 0; i < chip->iavg_num_samples; i++) {
pr_debug("iavg_samples_ma[%d] = %d\n", i,
chip->iavg_samples_ma[i]);
iavg_ma += chip->iavg_samples_ma[i];
}
chip->iavg_ma = DIV_ROUND_CLOSEST(iavg_ma,
chip->iavg_num_samples);
}
ibat_unchanged:
pr_debug("current_now_ma=%d averaged_iavg_ma=%d\n",
chip->current_now / 1000, chip->iavg_ma);
return chip->iavg_ma;
}
static int adjust_uuc(struct qpnp_bms_chip *chip, int soc_uuc)
{
int max_percent_change;
calculate_delta_time(&chip->uuc_tm_sec, &chip->uuc_delta_time_s);
/* make sure that the UUC changes 1% at a time */
max_percent_change = max(chip->uuc_delta_time_s
/ TIME_PER_PERCENT_UUC, 1);
if (chip->prev_soc_uuc == -EINVAL) {
/* start with a minimum UUC if the initial UUC is high */
if (soc_uuc > MIN_SOC_UUC)
chip->prev_soc_uuc = MIN_SOC_UUC;
else
chip->prev_soc_uuc = soc_uuc;
} else {
if (abs(chip->prev_soc_uuc - soc_uuc) <= max_percent_change)
chip->prev_soc_uuc = soc_uuc;
else if (soc_uuc > chip->prev_soc_uuc)
chip->prev_soc_uuc += max_percent_change;
else
chip->prev_soc_uuc -= max_percent_change;
}
pr_debug("soc_uuc=%d new_soc_uuc=%d\n", soc_uuc, chip->prev_soc_uuc);
return chip->prev_soc_uuc;
}
static int lookup_soc_ocv(struct qpnp_bms_chip *chip, int ocv_uv, int batt_temp)
{
int soc_ocv = 0, soc_cutoff = 0, soc_final = 0;
int fcc, acc, soc_uuc = 0, soc_acc = 0, iavg_ma = 0;
soc_ocv = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
batt_temp, ocv_uv / 1000);
soc_cutoff = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
batt_temp, chip->dt.cfg_v_cutoff_uv / 1000);
soc_final = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_cutoff),
(100 - soc_cutoff));
if (chip->batt_data->ibat_acc_lut) {
/* Apply ACC logic only if we discharging */
if (!is_battery_charging(chip) && chip->current_now > 0) {
/*
* IBAT averaging is disabled at low temp.
* allowing the SOC to catcup quickly.
*/
if (batt_temp > chip->dt.cfg_low_temp_threshold)
iavg_ma = calculate_uuc_iavg(chip);
else
iavg_ma = chip->current_now / 1000;
fcc = interpolate_fcc(chip->batt_data->fcc_temp_lut,
batt_temp);
acc = interpolate_acc(chip->batt_data->ibat_acc_lut,
batt_temp, iavg_ma);
if (acc <= 0) {
if (chip->last_acc)
acc = chip->last_acc;
else
acc = fcc;
}
soc_uuc = ((fcc - acc) * 100) / fcc;
if (batt_temp > chip->dt.cfg_low_temp_threshold)
soc_uuc = adjust_uuc(chip, soc_uuc);
soc_acc = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_uuc),
(100 - soc_uuc));
pr_debug("fcc=%d acc=%d soc_final=%d soc_uuc=%d soc_acc=%d current_now=%d iavg_ma=%d\n",
fcc, acc, soc_final, soc_uuc,
soc_acc, chip->current_now / 1000, iavg_ma);
soc_final = soc_acc;
chip->last_acc = acc;
} else {
/* charging - reset all the counters */
chip->last_acc = 0;
chip->iavg_num_samples = 0;
chip->iavg_index = 0;
chip->iavg_ma = 0;
chip->prev_current_now = 0;
chip->prev_soc_uuc = -EINVAL;
}
}
soc_final = bound_soc(soc_final);
pr_debug("soc_final=%d soc_ocv=%d soc_cutoff=%d ocv_uv=%u batt_temp=%d\n",
soc_final, soc_ocv, soc_cutoff, ocv_uv, batt_temp);
return soc_final;
}
#define V_PER_BIT_MUL_FACTOR 97656
#define V_PER_BIT_DIV_FACTOR 1000
#define VADC_INTRINSIC_OFFSET 0x6000
static int vadc_reading_to_uv(int reading, bool vadc_bms)
{
int64_t value;
if (!vadc_bms) {
/*
* All the BMS H/W VADC values are pre-compensated
* for VADC_INTRINSIC_OFFSET, subtract this offset
* only if this reading is not obtained from BMS
*/
if (reading <= VADC_INTRINSIC_OFFSET)
return 0;
reading -= VADC_INTRINSIC_OFFSET;
}
value = (reading * V_PER_BIT_MUL_FACTOR);
return div_u64(value, (u32)V_PER_BIT_DIV_FACTOR);
}
static int get_calculation_delay_ms(struct qpnp_bms_chip *chip)
{
if (bms_wake_active(&chip->vbms_lv_wake_source))
return chip->dt.cfg_low_voltage_calculate_soc_ms;
if (chip->calculated_soc < chip->dt.cfg_low_soc_calc_threshold)
return chip->dt.cfg_low_soc_calculate_soc_ms;
else
return chip->dt.cfg_calculate_soc_ms;
}
#define VADC_CALIB_UV 625000
#define VBATT_MUL_FACTOR 3
static int adjust_vbatt_reading(struct qpnp_bms_chip *chip, int reading_uv)
{
s64 numerator, denominator;
if (reading_uv == 0)
return 0;
/* don't adjust if not calibrated */
if (chip->vadc_v0625 == 0 || chip->vadc_v1250 == 0) {
pr_debug("No cal yet return %d\n",
VBATT_MUL_FACTOR * reading_uv);
return VBATT_MUL_FACTOR * reading_uv;
}
numerator = ((s64)reading_uv - chip->vadc_v0625) * VADC_CALIB_UV;
denominator = (s64)chip->vadc_v1250 - chip->vadc_v0625;
if (denominator == 0)
return reading_uv * VBATT_MUL_FACTOR;
return (VADC_CALIB_UV + div_s64(numerator, denominator))
* VBATT_MUL_FACTOR;
}
static int calib_vadc(struct qpnp_bms_chip *chip)
{
int rc, raw_0625, raw_1250;
struct qpnp_vadc_result result;
rc = qpnp_vadc_read(chip->vadc_dev, REF_625MV, &result);
if (rc) {
pr_debug("vadc read failed with rc = %d\n", rc);
return rc;
}
raw_0625 = result.adc_code;
rc = qpnp_vadc_read(chip->vadc_dev, REF_125V, &result);
if (rc) {
pr_debug("vadc read failed with rc = %d\n", rc);
return rc;
}
raw_1250 = result.adc_code;
chip->vadc_v0625 = vadc_reading_to_uv(raw_0625, false);
chip->vadc_v1250 = vadc_reading_to_uv(raw_1250, false);
pr_debug("vadc calib: 0625=%d raw (%d uv), 1250=%d raw (%d uv)\n",
raw_0625, chip->vadc_v0625, raw_1250, chip->vadc_v1250);
return 0;
}
static int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip,
u16 reading, bool is_pon_ocv)
{
int64_t uv, vbatt;
int rc;
uv = vadc_reading_to_uv(reading, true);
pr_debug("%u raw converted into %lld uv\n", reading, uv);
uv = adjust_vbatt_reading(chip, uv);
pr_debug("adjusted into %lld uv\n", uv);
vbatt = uv;
rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv, is_pon_ocv);
if (rc) {
pr_debug("Vbatt compensation failed rc = %d\n", rc);
uv = vbatt;
} else {
pr_debug("temp-compensated %lld into %lld uv\n", vbatt, uv);
}
return uv;
}
static void convert_and_store_ocv(struct qpnp_bms_chip *chip,
int batt_temp, bool is_pon_ocv)
{
int rc;
rc = calib_vadc(chip);
if (rc)
pr_err("Vadc reference voltage read failed, rc = %d\n", rc);
chip->last_ocv_uv = convert_vbatt_raw_to_uv(chip,
chip->last_ocv_raw, is_pon_ocv);
pr_debug("last_ocv_uv = %d\n", chip->last_ocv_uv);
}
static int read_and_update_ocv(struct qpnp_bms_chip *chip, int batt_temp,
bool is_pon_ocv)
{
int rc, ocv_uv;
u16 ocv_data = 0;
/* read the BMS h/w OCV */
rc = qpnp_read_wrapper(chip, (u8 *)&ocv_data,
chip->base + OCV_DATA0_REG, 2);
if (rc) {
pr_err("Error reading ocv: rc = %d\n", rc);
return -ENXIO;
}
/* check if OCV is within limits */
ocv_uv = convert_vbatt_raw_to_uv(chip, ocv_data, is_pon_ocv);
if (ocv_uv < MIN_OCV_UV) {
pr_err("OCV too low or invalid (%d)- rejecting it\n", ocv_uv);
return 0;
}
if ((chip->last_ocv_raw == OCV_UNINITIALIZED) ||
(chip->last_ocv_raw != ocv_data)) {
pr_debug("new OCV!\n");
chip->last_ocv_raw = ocv_data;
convert_and_store_ocv(chip, batt_temp, is_pon_ocv);
}
pr_debug("ocv_raw=0x%x last_ocv_raw=0x%x last_ocv_uv=%d\n",
ocv_data, chip->last_ocv_raw, chip->last_ocv_uv);
return 0;
}
static int get_battery_voltage(struct qpnp_bms_chip *chip, int *result_uv)
{
int rc;
struct qpnp_vadc_result adc_result;
rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &adc_result);
if (rc) {
pr_err("error reading adc channel = %d, rc = %d\n",
VBAT_SNS, rc);
return rc;
}
pr_debug("mvolts phy=%lld meas=0x%llx\n", adc_result.physical,
adc_result.measurement);
*result_uv = (int)adc_result.physical;
return 0;
}
static int get_battery_status(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->batt_psy == NULL)
chip->batt_psy = power_supply_get_by_name("battery");
if (chip->batt_psy) {
/* if battery has been registered, use the status property */
chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
return ret.intval;
}
/* Default to false if the battery power supply is not registered. */
pr_debug("battery power supply is not registered\n");
return POWER_SUPPLY_STATUS_UNKNOWN;
}
static int get_batt_therm(struct qpnp_bms_chip *chip, int *batt_temp)
{
int rc;
struct qpnp_vadc_result result;
rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result);
if (rc) {
pr_err("error reading adc channel = %d, rc = %d\n",
LR_MUX1_BATT_THERM, rc);
return rc;
}
pr_debug("batt_temp phy = %lld meas = 0x%llx\n",
result.physical, result.measurement);
*batt_temp = (int)result.physical;
return 0;
}
static int get_prop_bms_rbatt(struct qpnp_bms_chip *chip)
{
return chip->batt_data->default_rbatt_mohm;
}
static int get_rbatt(struct qpnp_bms_chip *chip, int soc, int batt_temp)
{
int rbatt_mohm, scalefactor;
rbatt_mohm = chip->batt_data->default_rbatt_mohm;
if (chip->batt_data->rbatt_sf_lut == NULL) {
pr_debug("RBATT = %d\n", rbatt_mohm);
return rbatt_mohm;
}
scalefactor = interpolate_scalingfactor(chip->batt_data->rbatt_sf_lut,
batt_temp, soc);
rbatt_mohm = (rbatt_mohm * scalefactor) / 100;
if (chip->dt.cfg_r_conn_mohm > 0)
rbatt_mohm += chip->dt.cfg_r_conn_mohm;
return rbatt_mohm;
}
static void charging_began(struct qpnp_bms_chip *chip)
{
int rc;
u8 state;
mutex_lock(&chip->last_soc_mutex);
chip->charge_start_tm_sec = 0;
chip->catch_up_time_sec = 0;
chip->start_soc = chip->last_soc;
/*
* reset ocv_at_100 to -EINVAL to indicate
* start of charging.
*/
chip->ocv_at_100 = -EINVAL;
mutex_unlock(&chip->last_soc_mutex);
/*
* If the BMS state is not in S2, force it in S2. Such
* a condition can only occur if we are coming out of
* suspend.
*/
mutex_lock(&chip->state_change_mutex);
rc = get_fsm_state(chip, &state);
if (rc)
pr_err("Unable to get FSM state rc=%d\n", rc);
if (rc || (state != S2_STATE)) {
pr_debug("Forcing S2 state\n");
rc = force_fsm_state(chip, S2_STATE);
if (rc)
pr_err("Unable to set FSM state rc=%d\n", rc);
}
mutex_unlock(&chip->state_change_mutex);
}
static void charging_ended(struct qpnp_bms_chip *chip)
{
u8 state;
int rc, status = get_battery_status(chip);
mutex_lock(&chip->last_soc_mutex);
chip->charge_start_tm_sec = 0;
chip->catch_up_time_sec = 0;
chip->end_soc = chip->last_soc;
if (status == POWER_SUPPLY_STATUS_FULL)
chip->last_soc_invalid = true;
mutex_unlock(&chip->last_soc_mutex);
/*
* If the BMS state is not in S2, force it in S2. Such
* a condition can only occur if we are coming out of
* suspend.
*/
mutex_lock(&chip->state_change_mutex);
rc = get_fsm_state(chip, &state);
if (rc)
pr_err("Unable to get FSM state rc=%d\n", rc);
if (rc || (state != S2_STATE)) {
pr_debug("Forcing S2 state\n");
rc = force_fsm_state(chip, S2_STATE);
if (rc)
pr_err("Unable to set FSM state rc=%d\n", rc);
}
mutex_unlock(&chip->state_change_mutex);
/* Calculate charge accumulated and update charge cycle */
if (chip->dt.cfg_battery_aging_comp &&
(chip->end_soc > chip->start_soc)) {
chip->charge_increase += (chip->end_soc - chip->start_soc);
if (chip->charge_increase > 100) {
chip->charge_cycles++;
chip->charge_increase %= 100;
}
pr_debug("start_soc=%u end_soc=%u charge_cycles=%u charge_increase=%u\n",
chip->start_soc, chip->end_soc,
chip->charge_cycles, chip->charge_increase);
rc = backup_charge_cycle(chip);
if (rc)
pr_err("Unable to store charge cycles rc=%d\n", rc);
}
}
static int estimate_ocv(struct qpnp_bms_chip *chip)
{
int i, rc, vbatt = 0, vbatt_final = 0;
for (i = 0; i < 5; i++) {
rc = get_battery_voltage(chip, &vbatt);
if (rc) {
pr_err("Unable to read battery-voltage rc=%d\n", rc);
return rc;
}
/*
* Conservatively select the lowest vbatt to avoid reporting
* a higher ocv due to variations in bootup current.
*/
if (i == 0)
vbatt_final = vbatt;
else if (vbatt < vbatt_final)
vbatt_final = vbatt;
msleep(20);
}
/*
* TODO: Revisit the OCV calcuations to use approximate ibatt
* and rbatt.
*/
return vbatt_final;
}
static int scale_soc_while_chg(struct qpnp_bms_chip *chip, int chg_time_sec,
int catch_up_sec, int new_soc, int prev_soc)
{
int scaled_soc;
int numerator;
/*
* Don't report a high value immediately slowly scale the
* value from prev_soc to the new soc based on a charge time
* weighted average
*/
pr_debug("cts=%d catch_up_sec=%d\n", chg_time_sec, catch_up_sec);
if (catch_up_sec == 0)
return new_soc;
if (chg_time_sec > catch_up_sec)
return new_soc;
numerator = (catch_up_sec - chg_time_sec) * prev_soc
+ chg_time_sec * new_soc;
scaled_soc = numerator / catch_up_sec;
pr_debug("cts=%d new_soc=%d prev_soc=%d scaled_soc=%d\n",
chg_time_sec, new_soc, prev_soc, scaled_soc);
return scaled_soc;
}
static int report_eoc(struct qpnp_bms_chip *chip)
{
int rc = -EINVAL;
union power_supply_propval ret = {0,};
if (chip->batt_psy == NULL)
chip->batt_psy = power_supply_get_by_name("battery");
if (chip->batt_psy) {
rc = chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
if (rc) {
pr_err("Unable to get battery 'STATUS' rc=%d\n", rc);
} else if (ret.intval != POWER_SUPPLY_STATUS_FULL) {
pr_debug("Report EOC to charger\n");
ret.intval = POWER_SUPPLY_STATUS_FULL;
rc = chip->batt_psy->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
if (rc) {
pr_err("Unable to set 'STATUS' rc=%d\n", rc);
return rc;
}
chip->eoc_reported = true;
}
} else {
pr_err("battery psy not registered\n");
}
return rc;
}
static void check_recharge_condition(struct qpnp_bms_chip *chip)
{
int rc;
union power_supply_propval ret = {0,};
int status = get_battery_status(chip);
if (chip->last_soc > chip->dt.cfg_soc_resume_limit)
return;
if (status == POWER_SUPPLY_STATUS_UNKNOWN) {
pr_debug("Unable to read battery status\n");
return;
}
/* Report recharge to charger for SOC based resume of charging */
if ((status != POWER_SUPPLY_STATUS_CHARGING) && chip->eoc_reported) {
ret.intval = POWER_SUPPLY_STATUS_CHARGING;
rc = chip->batt_psy->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
if (rc < 0) {
pr_err("Unable to set battery property rc=%d\n", rc);
} else {
pr_info("soc dropped below resume_soc soc=%d resume_soc=%d, restart charging\n",
chip->last_soc,
chip->dt.cfg_soc_resume_limit);
chip->eoc_reported = false;
}
}
}
static void check_eoc_condition(struct qpnp_bms_chip *chip)
{
int rc;
int status = get_battery_status(chip);
union power_supply_propval ret = {0,};
if (status == POWER_SUPPLY_STATUS_UNKNOWN) {
pr_err("Unable to read battery status\n");
return;
}
/*
* Check battery status:
* if last_soc is 100 and battery status is still charging
* reset ocv_at_100 and force reporting of eoc to charger.
*/
if ((chip->last_soc == 100) &&
(status == POWER_SUPPLY_STATUS_CHARGING))
chip->ocv_at_100 = -EINVAL;
/*
* Store the OCV value at 100. If the new ocv is greater than
* ocv_at_100 (battery settles), update ocv_at_100. Else
* if the SOC drops, reset ocv_at_100.
*/
if (chip->ocv_at_100 == -EINVAL) {
if (chip->last_soc == 100) {
if (chip->dt.cfg_report_charger_eoc) {
rc = report_eoc(chip);
if (!rc) {
/*
* update ocv_at_100 only if EOC is
* reported successfully.
*/
chip->ocv_at_100 = chip->last_ocv_uv;
pr_debug("Battery FULL\n");
} else {
pr_err("Unable to report eoc rc=%d\n",
rc);
chip->ocv_at_100 = -EINVAL;
}
}
if (chip->dt.cfg_use_reported_soc) {
/* begin reported_soc process */
chip->reported_soc_in_use = true;
chip->charger_removed_since_full = false;
chip->charger_reinserted = false;
chip->reported_soc = 100;
pr_debug("Begin reported_soc process\n");
}
}
} else {
if (chip->last_ocv_uv >= chip->ocv_at_100) {
pr_debug("new_ocv(%d) > ocv_at_100(%d) maintaining SOC to 100\n",
chip->last_ocv_uv, chip->ocv_at_100);
chip->ocv_at_100 = chip->last_ocv_uv;
chip->last_soc = 100;
} else if (chip->last_soc != 100) {
/*
* Report that the battery is discharging.
* This gets called once when the SOC falls
* below 100.
*/
if (chip->reported_soc_in_use
&& chip->reported_soc == 100) {
pr_debug("reported_soc=100, last_soc=%d, do not send DISCHARING status\n",
chip->last_soc);
} else {
ret.intval = POWER_SUPPLY_STATUS_DISCHARGING;
chip->batt_psy->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
}
pr_debug("SOC dropped (%d) discarding ocv_at_100\n",
chip->last_soc);
chip->ocv_at_100 = -EINVAL;
}
}
}
static int report_voltage_based_soc(struct qpnp_bms_chip *chip)
{
pr_debug("Reported voltage based soc = %d\n",
chip->prev_voltage_based_soc);
return chip->prev_voltage_based_soc;
}
static int prepare_reported_soc(struct qpnp_bms_chip *chip)
{
if (chip->charger_removed_since_full == false) {
/*
* charger is not removed since full,
* keep reported_soc as 100 and calculate the delta soc
* between reported_soc and last_soc
*/
chip->reported_soc = 100;
chip->reported_soc_delta = 100 - chip->last_soc;
pr_debug("Keep at reported_soc 100, reported_soc_delta=%d, last_soc=%d\n",
chip->reported_soc_delta,
chip->last_soc);
} else {
/* charger is removed since full */
if (chip->charger_reinserted) {
/*
* charger reinserted, keep the reported_soc
* until it equals to last_soc.
*/
if (chip->reported_soc == chip->last_soc) {
chip->reported_soc_in_use = false;
chip->reported_soc_high_current = false;
pr_debug("reported_soc equals to last_soc, stop reported_soc process\n");
}
chip->reported_soc_change_sec = 0;
}
}
pr_debug("Reporting reported_soc=%d, last_soc=%d\n",
chip->reported_soc, chip->last_soc);
return chip->reported_soc;
}
#define SOC_CATCHUP_SEC_MAX 600
#define SOC_CATCHUP_SEC_PER_PERCENT 60
#define MAX_CATCHUP_SOC (SOC_CATCHUP_SEC_MAX / SOC_CATCHUP_SEC_PER_PERCENT)
#define SOC_CHANGE_PER_SEC 5
static int report_vm_bms_soc(struct qpnp_bms_chip *chip)
{
int soc, soc_change, batt_temp, rc;
int time_since_last_change_sec = 0, charge_time_sec = 0;
unsigned long last_change_sec;
bool charging;
soc = chip->calculated_soc;
last_change_sec = chip->last_soc_change_sec;
calculate_delta_time(&last_change_sec, &time_since_last_change_sec);
charging = is_battery_charging(chip);
pr_debug("charging=%d last_soc=%d last_soc_unbound=%d\n",
charging, chip->last_soc, chip->last_soc_unbound);
/*
* account for charge time - limit it to SOC_CATCHUP_SEC to
* avoid overflows when charging continues for extended periods
*/
if (charging && chip->last_soc != -EINVAL) {
if (chip->charge_start_tm_sec == 0) {
/*
* calculating soc for the first time
* after start of chg. Initialize catchup time
*/
if (abs(soc - chip->last_soc) < MAX_CATCHUP_SOC)
chip->catch_up_time_sec =
(soc - chip->last_soc)
* SOC_CATCHUP_SEC_PER_PERCENT;
else
chip->catch_up_time_sec = SOC_CATCHUP_SEC_MAX;
if (chip->catch_up_time_sec < 0)
chip->catch_up_time_sec = 0;
chip->charge_start_tm_sec = last_change_sec;
}
charge_time_sec = min(SOC_CATCHUP_SEC_MAX, (int)last_change_sec
- chip->charge_start_tm_sec);
/* end catchup if calculated soc and last soc are same */
if (chip->last_soc == soc)
chip->catch_up_time_sec = 0;
}
if (chip->last_soc != -EINVAL) {
/*
* last_soc < soc ... if we have not been charging at all
* since the last time this was called, report previous SoC.
* Otherwise, scale and catch up.
*/
rc = get_batt_therm(chip, &batt_temp);
if (rc)
batt_temp = BMS_DEFAULT_TEMP;
if (chip->last_soc < soc && !charging)
soc = chip->last_soc;
else if (chip->last_soc < soc && soc != 100)
soc = scale_soc_while_chg(chip, charge_time_sec,
chip->catch_up_time_sec,
soc, chip->last_soc);
/*
* if the battery is close to cutoff or if the batt_temp
* is under the low-temp threshold allow bigger change
*/
if (bms_wake_active(&chip->vbms_lv_wake_source) ||
(batt_temp <= chip->dt.cfg_low_temp_threshold))
soc_change = min((int)abs(chip->last_soc - soc),
time_since_last_change_sec);
else
soc_change = min((int)abs(chip->last_soc - soc),
time_since_last_change_sec
/ SOC_CHANGE_PER_SEC);
if (chip->last_soc_unbound) {
chip->last_soc_unbound = false;
} else {
/*
* if soc have not been unbound by resume,
* only change reported SoC by 1.
*/
soc_change = min(1, soc_change);
}
if (soc < chip->last_soc && soc != 0)
soc = chip->last_soc - soc_change;
if (soc > chip->last_soc && soc != 100)
soc = chip->last_soc + soc_change;
}
if (chip->last_soc != soc && !chip->last_soc_unbound)
chip->last_soc_change_sec = last_change_sec;
/*
* Check/update eoc under following condition:
* if there is change in soc:
* soc != chip->last_soc
* during bootup if soc is 100:
*/
soc = bound_soc(soc);
if ((soc != chip->last_soc) || (soc == 100)) {
chip->last_soc = soc;
check_eoc_condition(chip);
if ((chip->dt.cfg_soc_resume_limit > 0) && !charging)
check_recharge_condition(chip);
}
pr_debug("last_soc=%d calculated_soc=%d soc=%d time_since_last_change=%d\n",
chip->last_soc, chip->calculated_soc,
soc, time_since_last_change_sec);
/*
* Backup the actual ocv (last_ocv_uv) and not the
* last_soc-interpolated ocv. This makes sure that
* the BMS algorithm always uses the correct ocv and
* can catch up on the last_soc (across reboots).
* We do not want the algorithm to be based of a wrong
* initial OCV.
*/
backup_ocv_soc(chip, chip->last_ocv_uv, chip->last_soc);
if (chip->reported_soc_in_use)
return prepare_reported_soc(chip);
pr_debug("Reported SOC=%d\n", chip->last_soc);
return chip->last_soc;
}
static int report_state_of_charge(struct qpnp_bms_chip *chip)
{
int soc;
mutex_lock(&chip->last_soc_mutex);
if (chip->dt.cfg_use_voltage_soc)
soc = report_voltage_based_soc(chip);
else
soc = report_vm_bms_soc(chip);
mutex_unlock(&chip->last_soc_mutex);
return soc;
}
static void btm_notify_vbat(enum qpnp_tm_state state, void *ctx)
{
struct qpnp_bms_chip *chip = ctx;
int vbat_uv;
int rc;
rc = get_battery_voltage(chip, &vbat_uv);
if (rc) {
pr_err("error reading vbat_sns adc channel=%d, rc=%d\n",
VBAT_SNS, rc);
goto out;
}
pr_debug("vbat is at %d, state is at %d\n", vbat_uv, state);
if (state == ADC_TM_LOW_STATE) {
pr_debug("low voltage btm notification triggered\n");
if (vbat_uv <= (chip->vbat_monitor_params.low_thr
+ VBATT_ERROR_MARGIN)) {
if (!bms_wake_active(&chip->vbms_lv_wake_source))
bms_stay_awake(&chip->vbms_lv_wake_source);
chip->vbat_monitor_params.state_request =
ADC_TM_HIGH_THR_ENABLE;
} else {
pr_debug("faulty btm trigger, discarding\n");
goto out;
}
} else if (state == ADC_TM_HIGH_STATE) {
pr_debug("high voltage btm notification triggered\n");
if (vbat_uv > chip->vbat_monitor_params.high_thr) {
chip->vbat_monitor_params.state_request =
ADC_TM_LOW_THR_ENABLE;
if (bms_wake_active(&chip->vbms_lv_wake_source))
bms_relax(&chip->vbms_lv_wake_source);
} else {
pr_debug("faulty btm trigger, discarding\n");
goto out;
}
} else {
pr_debug("unknown voltage notification state: %d\n", state);
goto out;
}
if (chip->bms_psy_registered)
power_supply_changed(&chip->bms_psy);
out:
qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
&chip->vbat_monitor_params);
}
static int reset_vbat_monitoring(struct qpnp_bms_chip *chip)
{
int rc;
chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE;
rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
&chip->vbat_monitor_params);
if (rc) {
pr_err("tm disable failed: %d\n", rc);
return rc;
}
if (bms_wake_active(&chip->vbms_lv_wake_source))
bms_relax(&chip->vbms_lv_wake_source);
return 0;
}
static int setup_vbat_monitoring(struct qpnp_bms_chip *chip)
{
int rc;
chip->vbat_monitor_params.low_thr =
chip->dt.cfg_low_voltage_threshold;
chip->vbat_monitor_params.high_thr =
chip->dt.cfg_low_voltage_threshold
+ VBATT_ERROR_MARGIN;
chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE;
chip->vbat_monitor_params.channel = VBAT_SNS;
chip->vbat_monitor_params.btm_ctx = chip;
chip->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
chip->vbat_monitor_params.threshold_notification = &btm_notify_vbat;
pr_debug("set low thr to %d and high to %d\n",
chip->vbat_monitor_params.low_thr,
chip->vbat_monitor_params.high_thr);
rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
&chip->vbat_monitor_params);
if (rc) {
pr_err("adc-tm setup failed: %d\n", rc);
return rc;
}
pr_debug("vbat monitoring setup complete\n");
return 0;
}
static void very_low_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv)
{
if (!bms_wake_active(&chip->vbms_lv_wake_source)
&& (vbat_uv <= chip->dt.cfg_low_voltage_threshold)) {
pr_debug("voltage=%d holding low voltage ws\n", vbat_uv);
bms_stay_awake(&chip->vbms_lv_wake_source);
} else if (bms_wake_active(&chip->vbms_lv_wake_source)
&& (vbat_uv > chip->dt.cfg_low_voltage_threshold)) {
pr_debug("voltage=%d releasing low voltage ws\n", vbat_uv);
bms_relax(&chip->vbms_lv_wake_source);
}
}
static void cv_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv)
{
if (bms_wake_active(&chip->vbms_cv_wake_source)) {
if ((vbat_uv < (chip->dt.cfg_max_voltage_uv -
VBATT_ERROR_MARGIN + CV_DROP_MARGIN))
&& !is_battery_taper_charging(chip)) {
pr_debug("Fell below CV, releasing cv ws\n");
chip->in_cv_state = false;
bms_relax(&chip->vbms_cv_wake_source);
} else if (!is_battery_charging(chip)) {
pr_debug("charging stopped, releasing cv ws\n");
chip->in_cv_state = false;
bms_relax(&chip->vbms_cv_wake_source);
}
} else if (!bms_wake_active(&chip->vbms_cv_wake_source)
&& is_battery_charging(chip)
&& ((vbat_uv > (chip->dt.cfg_max_voltage_uv -
VBATT_ERROR_MARGIN))
|| is_battery_taper_charging(chip))) {
pr_debug("CC_TO_CV voltage=%d holding cv ws\n", vbat_uv);
chip->in_cv_state = true;
bms_stay_awake(&chip->vbms_cv_wake_source);
}
}
static void low_soc_check(struct qpnp_bms_chip *chip)
{
int rc;
if (chip->dt.cfg_low_soc_fifo_length < 1)
return;
mutex_lock(&chip->state_change_mutex);
if (chip->calculated_soc <= chip->dt.cfg_low_soc_calc_threshold) {
if (!chip->low_soc_fifo_set) {
pr_debug("soc=%d (low-soc) setting fifo_length to %d\n",
chip->calculated_soc,
chip->dt.cfg_low_soc_fifo_length);
rc = get_fifo_length(chip, S2_STATE,
&chip->s2_fifo_length);
if (rc) {
pr_err("Unable to get_fifo_length rc=%d", rc);
goto low_soc_exit;
}
rc = set_fifo_length(chip, S2_STATE,
chip->dt.cfg_low_soc_fifo_length);
if (rc) {
pr_err("Unable to set_fifo_length rc=%d", rc);
goto low_soc_exit;
}
chip->low_soc_fifo_set = true;
}
} else {
if (chip->low_soc_fifo_set) {
pr_debug("soc=%d setting back fifo_length to %d\n",
chip->calculated_soc,
chip->s2_fifo_length);
rc = set_fifo_length(chip, S2_STATE,
chip->s2_fifo_length);
if (rc) {
pr_err("Unable to set_fifo_length rc=%d", rc);
goto low_soc_exit;
}
chip->low_soc_fifo_set = false;
}
}
low_soc_exit:
mutex_unlock(&chip->state_change_mutex);
}
static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip)
{
int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
int rc, vbat_uv;
/* check if we have the averaged fifo data */
if (chip->voltage_soc_uv) {
vbat_uv = chip->voltage_soc_uv;
} else {
rc = get_battery_voltage(chip, &vbat_uv);
if (rc < 0) {
pr_err("adc vbat failed err = %d\n", rc);
return rc;
}
pr_debug("instant-voltage based voltage-soc\n");
}
voltage_range_uv = chip->dt.cfg_max_voltage_uv -
chip->dt.cfg_v_cutoff_uv;
voltage_remaining_uv = vbat_uv - chip->dt.cfg_v_cutoff_uv;
voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv;
voltage_based_soc = clamp(voltage_based_soc, 0, 100);
if (chip->prev_voltage_based_soc != voltage_based_soc
&& chip->bms_psy_registered) {
pr_debug("update bms_psy\n");
power_supply_changed(&chip->bms_psy);
}
chip->prev_voltage_based_soc = voltage_based_soc;
pr_debug("vbat used = %duv\n", vbat_uv);
pr_debug("Calculated voltage based soc=%d\n", voltage_based_soc);
if (voltage_based_soc == 100)
if (chip->dt.cfg_report_charger_eoc)
report_eoc(chip);
return 0;
}
static void calculate_reported_soc(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->reported_soc > chip->last_soc) {
/*send DISCHARGING status if the reported_soc drops from 100 */
if (chip->reported_soc == 100) {
ret.intval = POWER_SUPPLY_STATUS_DISCHARGING;
chip->batt_psy->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
pr_debug("Report discharging status, reported_soc=%d, last_soc=%d\n",
chip->reported_soc, chip->last_soc);
}
/*
* reported_soc_delta is used to prevent
* the big change in last_soc,
* this is not used in high current mode
*/
if (chip->reported_soc_delta > 0)
chip->reported_soc_delta--;
if (chip->reported_soc_high_current)
chip->reported_soc--;
else
chip->reported_soc = chip->last_soc
+ chip->reported_soc_delta;
pr_debug("New reported_soc=%d, last_soc is=%d\n",
chip->reported_soc, chip->last_soc);
} else {
chip->reported_soc_in_use = false;
chip->reported_soc_high_current = false;
pr_debug("reported_soc equals last_soc,stop reported_soc process\n");
}
pr_debug("bms power_supply_changed\n");
power_supply_changed(&chip->bms_psy);
}
static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
{
int rc, vbat_uv;
rc = get_battery_voltage(chip, &vbat_uv);
if (rc < 0) {
pr_err("adc vbat failed err = %d\n", rc);
return soc;
}
/* only clamp when discharging */
if (is_battery_charging(chip))
return soc;
if (soc <= 0 && vbat_uv > chip->dt.cfg_v_cutoff_uv) {
pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
vbat_uv, chip->dt.cfg_v_cutoff_uv);
return 1;
} else {
pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n",
soc, vbat_uv, chip->dt.cfg_v_cutoff_uv);
return soc;
}
}
#define UI_SOC_CATCHUP_TIME (60)
static void monitor_soc_work(struct work_struct *work)
{
struct qpnp_bms_chip *chip = container_of(work,
struct qpnp_bms_chip,
monitor_soc_work.work);
int rc, vbat_uv = 0, new_soc = 0, batt_temp;
bms_stay_awake(&chip->vbms_soc_wake_source);
calculate_delta_time(&chip->tm_sec, &chip->delta_time_s);
pr_debug("elapsed_time=%d\n", chip->delta_time_s);
mutex_lock(&chip->last_soc_mutex);
if (!is_battery_present(chip)) {
/* if battery is not preset report 100% SOC */
pr_debug("battery gone, reporting 100\n");
chip->last_soc_invalid = true;
chip->last_soc = -EINVAL;
new_soc = 100;
} else {
rc = get_battery_voltage(chip, &vbat_uv);
if (rc < 0) {
pr_err("Failed to read battery-voltage rc=%d\n", rc);
} else {
very_low_voltage_check(chip, vbat_uv);
cv_voltage_check(chip, vbat_uv);
}
if (chip->dt.cfg_use_voltage_soc) {
calculate_soc_from_voltage(chip);
} else {
rc = get_batt_therm(chip, &batt_temp);
if (rc < 0) {
pr_err("Unable to read batt temp rc=%d, using default=%d\n",
rc, BMS_DEFAULT_TEMP);
batt_temp = BMS_DEFAULT_TEMP;
}
if (chip->last_soc_invalid) {
chip->last_soc_invalid = false;
chip->last_soc = -EINVAL;
}
new_soc = lookup_soc_ocv(chip, chip->last_ocv_uv,
batt_temp);
/* clamp soc due to BMS hw/sw immaturities */
new_soc = clamp_soc_based_on_voltage(chip, new_soc);
if (chip->calculated_soc != new_soc) {
pr_debug("SOC changed! new_soc=%d prev_soc=%d\n",
new_soc, chip->calculated_soc);
chip->calculated_soc = new_soc;
if (chip->calculated_soc == 100)
/* update last_soc immediately */
report_vm_bms_soc(chip);
pr_debug("update bms_psy\n");
power_supply_changed(&chip->bms_psy);
} else if (chip->last_soc != chip->calculated_soc) {
pr_debug("update bms_psy\n");
power_supply_changed(&chip->bms_psy);
} else {
report_vm_bms_soc(chip);
}
}
/* low SOC configuration */
low_soc_check(chip);
}
/*
* schedule the work only if last_soc has not caught up with
* the calculated soc or if we are using voltage based soc
*/
if ((chip->last_soc != chip->calculated_soc) ||
chip->dt.cfg_use_voltage_soc)
schedule_delayed_work(&chip->monitor_soc_work,
msecs_to_jiffies(get_calculation_delay_ms(chip)));
if (chip->reported_soc_in_use && chip->charger_removed_since_full
&& !chip->charger_reinserted) {
/* record the elapsed time after last reported_soc change */
chip->reported_soc_change_sec += chip->delta_time_s;
pr_debug("reported_soc_change_sec=%d\n",
chip->reported_soc_change_sec);
/* above the catch up time, calculate new reported_soc */
if (chip->reported_soc_change_sec > UI_SOC_CATCHUP_TIME) {
calculate_reported_soc(chip);
chip->reported_soc_change_sec = 0;
}
}
mutex_unlock(&chip->last_soc_mutex);
bms_relax(&chip->vbms_soc_wake_source);
}
static void voltage_soc_timeout_work(struct work_struct *work)
{
struct qpnp_bms_chip *chip = container_of(work,
struct qpnp_bms_chip,
voltage_soc_timeout_work.work);
mutex_lock(&chip->bms_device_mutex);
if (!chip->bms_dev_open) {
pr_warn("BMS device not opened, using voltage based SOC\n");
chip->dt.cfg_use_voltage_soc = true;
}
mutex_unlock(&chip->bms_device_mutex);
}
static int get_prop_bms_capacity(struct qpnp_bms_chip *chip)
{
return report_state_of_charge(chip);
}
static bool is_hi_power_state_requested(struct qpnp_bms_chip *chip)
{
pr_debug("hi_power_state=0x%x\n", chip->hi_power_state);
if (chip->hi_power_state & VMBMS_IGNORE_ALL_BIT)
return false;
else
return !!chip->hi_power_state;
}
static int qpnp_vm_bms_config_power_state(struct qpnp_bms_chip *chip,
int usecase, bool hi_power_enable)
{
if (usecase < 0) {
pr_err("Invalid power-usecase %x\n", usecase);
return -EINVAL;
}
if (hi_power_enable)
chip->hi_power_state |= usecase;
else
chip->hi_power_state &= ~usecase;
pr_debug("hi_power_state=%x usecase=%x hi_power_enable=%d\n",
chip->hi_power_state, usecase, hi_power_enable);
return 0;
}
static int get_prop_bms_current_now(struct qpnp_bms_chip *chip)
{
return chip->current_now;
}
static enum power_supply_property bms_power_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_RESISTANCE,
POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
POWER_SUPPLY_PROP_RESISTANCE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_HI_POWER,
POWER_SUPPLY_PROP_LOW_POWER,
POWER_SUPPLY_PROP_BATTERY_TYPE,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CYCLE_COUNT,
};
static int
qpnp_vm_bms_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
case POWER_SUPPLY_PROP_HI_POWER:
case POWER_SUPPLY_PROP_LOW_POWER:
return 1;
default:
break;
}
return 0;
}
static int qpnp_vm_bms_power_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct qpnp_bms_chip *chip = container_of(psy,
struct qpnp_bms_chip, bms_psy);
int value = 0, rc;
val->intval = 0;
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = get_prop_bms_capacity(chip);
break;
case POWER_SUPPLY_PROP_STATUS:
val->intval = chip->battery_status;
break;
case POWER_SUPPLY_PROP_RESISTANCE:
val->intval = get_prop_bms_rbatt(chip);
break;
case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE:
if (chip->batt_data->rbatt_capacitive_mohm > 0)
val->intval = chip->batt_data->rbatt_capacitive_mohm;
if (chip->dt.cfg_r_conn_mohm > 0)
val->intval += chip->dt.cfg_r_conn_mohm;
break;
case POWER_SUPPLY_PROP_RESISTANCE_NOW:
rc = get_batt_therm(chip, &value);
if (rc < 0)
value = BMS_DEFAULT_TEMP;
val->intval = get_rbatt(chip, chip->calculated_soc, value);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = get_prop_bms_current_now(chip);
break;
case POWER_SUPPLY_PROP_BATTERY_TYPE:
val->strval = chip->batt_data->battery_type;
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
val->intval = chip->last_ocv_uv;
break;
case POWER_SUPPLY_PROP_TEMP:
rc = get_batt_therm(chip, &value);
if (rc < 0)
value = BMS_DEFAULT_TEMP;
val->intval = value;
break;
case POWER_SUPPLY_PROP_HI_POWER:
val->intval = is_hi_power_state_requested(chip);
break;
case POWER_SUPPLY_PROP_LOW_POWER:
val->intval = !is_hi_power_state_requested(chip);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
if (chip->dt.cfg_battery_aging_comp)
val->intval = chip->charge_cycles;
else
val->intval = -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static int qpnp_vm_bms_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
int rc = 0;
struct qpnp_bms_chip *chip = container_of(psy,
struct qpnp_bms_chip, bms_psy);
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_NOW:
chip->current_now = val->intval;
pr_debug("IBATT = %d\n", val->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
cancel_delayed_work_sync(&chip->monitor_soc_work);
chip->last_ocv_uv = val->intval;
pr_debug("OCV = %d\n", val->intval);
schedule_delayed_work(&chip->monitor_soc_work, 0);
break;
case POWER_SUPPLY_PROP_HI_POWER:
rc = qpnp_vm_bms_config_power_state(chip, val->intval, true);
if (rc)
pr_err("Unable to set power-state rc=%d\n", rc);
break;
case POWER_SUPPLY_PROP_LOW_POWER:
rc = qpnp_vm_bms_config_power_state(chip, val->intval, false);
if (rc)
pr_err("Unable to set power-state rc=%d\n", rc);
break;
default:
return -EINVAL;
}
return rc;
}
static void bms_new_battery_setup(struct qpnp_bms_chip *chip)
{
int rc;
mutex_lock(&chip->bms_data_mutex);
chip->last_soc_invalid = true;
/*
* disable and re-enable the BMS hardware to reset
* the realtime-FIFO data and restart accumulation
*/
rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG,
BMS_EN_BIT, 0);
/* delay for the BMS hardware to reset its state */
msleep(200);
rc |= qpnp_masked_write_base(chip, chip->base + EN_CTL_REG,
BMS_EN_BIT, BMS_EN_BIT);
/* delay for the BMS hardware to re-start */
msleep(200);
if (rc)
pr_err("Unable to reset BMS rc=%d\n", rc);
chip->last_ocv_uv = estimate_ocv(chip);
memset(&chip->bms_data, 0, sizeof(chip->bms_data));
/* update the sequence number */
chip->bms_data.seq_num = chip->seq_num++;
/* signal the read thread */
chip->data_ready = 1;
wake_up_interruptible(&chip->bms_wait_q);
/* hold a wake lock until the read thread is scheduled */
if (chip->bms_dev_open)
pm_stay_awake(chip->dev);
mutex_unlock(&chip->bms_data_mutex);
/* reset aging variables */
if (chip->dt.cfg_battery_aging_comp) {
chip->charge_cycles = 0;
chip->charge_increase = 0;
rc = backup_charge_cycle(chip);
if (rc)
pr_err("Unable to reset aging data rc=%d\n", rc);
}
}
static void battery_insertion_check(struct qpnp_bms_chip *chip)
{
int present = (int)is_battery_present(chip);
if (chip->battery_present != present) {
pr_debug("shadow_sts=%d status=%d\n",
chip->battery_present, present);
if (chip->battery_present != -EINVAL) {
if (present) {
/* new battery inserted */
bms_new_battery_setup(chip);
setup_vbat_monitoring(chip);
pr_debug("New battery inserted!\n");
} else {
/* battery removed */
reset_vbat_monitoring(chip);
pr_debug("Battery removed\n");
}
}
chip->battery_present = present;
}
}
static void battery_status_check(struct qpnp_bms_chip *chip)
{
int status = get_battery_status(chip);
if (chip->battery_status != status) {
if (status == POWER_SUPPLY_STATUS_CHARGING) {
pr_debug("charging started\n");
charging_began(chip);
} else if (chip->battery_status ==
POWER_SUPPLY_STATUS_CHARGING) {
pr_debug("charging stopped\n");
charging_ended(chip);
}
if (status == POWER_SUPPLY_STATUS_FULL) {
pr_debug("battery full\n");
chip->battery_full = true;
} else if (chip->battery_status == POWER_SUPPLY_STATUS_FULL) {
pr_debug("battery not-full anymore\n");
chip->battery_full = false;
}
chip->battery_status = status;
}
}
#define HIGH_CURRENT_TH 2
static void reported_soc_check_status(struct qpnp_bms_chip *chip)
{
u8 present;
present = is_charger_present(chip);
pr_debug("usb_present=%d\n", present);
if (!present && !chip->charger_removed_since_full) {
chip->charger_removed_since_full = true;
pr_debug("reported_soc: charger removed since full\n");
return;
}
if (chip->reported_soc_high_current) {
pr_debug("reported_soc in high current mode, return\n");
return;
}
if ((chip->reported_soc - chip->last_soc) >
(100 - chip->dt.cfg_soc_resume_limit
+ HIGH_CURRENT_TH)) {
chip->reported_soc_high_current = true;
chip->charger_removed_since_full = true;
chip->charger_reinserted = false;
pr_debug("reported_soc enters high current mode\n");
return;
}
if (present && chip->charger_removed_since_full) {
chip->charger_reinserted = true;
pr_debug("reported_soc: charger reinserted\n");
}
if (!present && chip->charger_removed_since_full) {
chip->charger_reinserted = false;
pr_debug("reported_soc: charger removed again\n");
}
}
static void qpnp_vm_bms_ext_power_changed(struct power_supply *psy)
{
struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip,
bms_psy);
pr_debug("Triggered!\n");
battery_status_check(chip);
battery_insertion_check(chip);
if (chip->reported_soc_in_use)
reported_soc_check_status(chip);
}
static void dump_bms_data(const char *func, struct qpnp_bms_chip *chip)
{
int i;
pr_debug("%s: fifo_count=%d acc_count=%d seq_num=%d\n",
func, chip->bms_data.num_fifo,
chip->bms_data.acc_count,
chip->bms_data.seq_num);
for (i = 0; i < chip->bms_data.num_fifo; i++)
pr_debug("fifo=%d fifo_uv=%d sample_interval=%d sample_count=%d\n",
i, chip->bms_data.fifo_uv[i],
chip->bms_data.sample_interval_ms,
chip->bms_data.sample_count);
pr_debug("avg_acc_data=%d\n", chip->bms_data.acc_uv);
}
static int read_and_populate_fifo_data(struct qpnp_bms_chip *chip)
{
u8 fifo_count = 0, val = 0;
u8 fifo_data_raw[MAX_FIFO_REGS * 2];
u16 fifo_data;
int rc, i, j;
int64_t voltage_soc_avg = 0;
/* read the completed FIFO count */
rc = qpnp_read_wrapper(chip, &val, chip->base + STATUS2_REG, 1);
if (rc) {
pr_err("Unable to read STATUS2 register rc=%d\n", rc);
return rc;
}
fifo_count = (val & FIFO_CNT_SD_MASK) >> FIFO_CNT_SD_SHIFT;
pr_debug("fifo_count=%d\n", fifo_count);
if (!fifo_count) {
pr_debug("No data in FIFO\n");
return 0;
} else if (fifo_count > MAX_FIFO_REGS) {
pr_err("Invalid fifo-length %d rejecting data\n", fifo_count);
chip->bms_data.num_fifo = 0;
return 0;
}
/* read the FIFO data */
for (i = 0; i < fifo_count * 2; i++) {
rc = qpnp_read_wrapper(chip, &fifo_data_raw[i],
chip->base + FIFO_0_LSB_REG + i, 1);
if (rc) {
pr_err("Unable to read FIFO register(%d) rc=%d\n",
i, rc);
return rc;
}
}
/* populate the structure */
chip->bms_data.num_fifo = fifo_count;
rc = get_sample_interval(chip, chip->current_fsm_state,
&chip->bms_data.sample_interval_ms);
if (rc) {
pr_err("Unable to read state=%d sample_interval rc=%d\n",
chip->current_fsm_state, rc);
return rc;
}
rc = get_sample_count(chip, chip->current_fsm_state,
&chip->bms_data.sample_count);
if (rc) {
pr_err("Unable to read state=%d sample_count rc=%d\n",
chip->current_fsm_state, rc);
return rc;
}
for (i = 0, j = 0; i < fifo_count * 2; i = i + 2, j++) {
fifo_data = fifo_data_raw[i] | (fifo_data_raw[i + 1] << 8);
chip->bms_data.fifo_uv[j] = convert_vbatt_raw_to_uv(chip,
fifo_data, 0);
voltage_soc_avg += chip->bms_data.fifo_uv[j];
}
/* store the fifo average for voltage-based-soc */
chip->voltage_soc_uv = div_u64(voltage_soc_avg, fifo_count);
return 0;
}
static int read_and_populate_acc_data(struct qpnp_bms_chip *chip)
{
int rc;
u32 acc_data_sd = 0, acc_count_sd = 0, avg_acc_data = 0;
/* read ACC SD count */
rc = qpnp_read_wrapper(chip, (u8 *)&acc_count_sd,
chip->base + ACC_CNT_SD_REG, 1);
if (rc) {
pr_err("Unable to read ACC_CNT_SD_REG rc=%d\n", rc);
return rc;
}
if (!acc_count_sd) {
pr_debug("No data in accumulator\n");
return 0;
}
/* read ACC SD data */
rc = qpnp_read_wrapper(chip, (u8 *)&acc_data_sd,
chip->base + ACC_DATA0_SD_REG, 3);
if (rc) {
pr_err("Unable to read ACC_DATA0_SD_REG rc=%d\n", rc);
return rc;
}
avg_acc_data = div_u64(acc_data_sd, acc_count_sd);
chip->bms_data.acc_uv = convert_vbatt_raw_to_uv(chip,
avg_acc_data, 0);
chip->bms_data.acc_count = acc_count_sd;
rc = get_sample_interval(chip, chip->current_fsm_state,
&chip->bms_data.sample_interval_ms);
if (rc) {
pr_err("Unable to read state=%d sample_interval rc=%d\n",
chip->current_fsm_state, rc);
return rc;
}
rc = get_sample_count(chip, chip->current_fsm_state,
&chip->bms_data.sample_count);
if (rc) {
pr_err("Unable to read state=%d sample_count rc=%d\n",
chip->current_fsm_state, rc);
return rc;
}
return 0;
}
static int clear_fifo_acc_data(struct qpnp_bms_chip *chip)
{
int rc;
u8 reg = 0;
reg = FIFO_CNT_SD_CLR_BIT | ACC_DATA_SD_CLR_BIT | ACC_CNT_SD_CLR_BIT;
rc = qpnp_masked_write_base(chip, chip->base + DATA_CTL2_REG, reg, reg);
if (rc)
pr_err("Unable to write DATA_CTL2_REG rc=%d\n", rc);
return rc;
}
static irqreturn_t bms_fifo_update_done_irq_handler(int irq, void *_chip)
{
int rc;
struct qpnp_bms_chip *chip = _chip;
pr_debug("fifo_update_done triggered\n");
mutex_lock(&chip->bms_data_mutex);
if (chip->suspend_data_valid) {
pr_debug("Suspend data not processed yet\n");
goto fail_fifo;
}
rc = calib_vadc(chip);
if (rc)
pr_err("Unable to calibrate vadc rc=%d\n", rc);
/* clear old data */
memset(&chip->bms_data, 0, sizeof(chip->bms_data));
/*
* 1. Read FIFO and populate the bms_data
* 2. Clear FIFO data
* 3. Notify userspace
*/
rc = update_fsm_state(chip);
if (rc) {
pr_err("Unable to read FSM state rc=%d\n", rc);
goto fail_fifo;
}
pr_debug("fsm_state=%d\n", chip->current_fsm_state);
rc = read_and_populate_fifo_data(chip);
if (rc) {
pr_err("Unable to read FIFO data rc=%d\n", rc);
goto fail_fifo;
}
rc = clear_fifo_acc_data(chip);
if (rc)
pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc);
/* update the sequence number */
chip->bms_data.seq_num = chip->seq_num++;
dump_bms_data(__func__, chip);
/* signal the read thread */
chip->data_ready = 1;
wake_up_interruptible(&chip->bms_wait_q);
/* hold a wake lock until the read thread is scheduled */
if (chip->bms_dev_open)
pm_stay_awake(chip->dev);
fail_fifo:
mutex_unlock(&chip->bms_data_mutex);
return IRQ_HANDLED;
}
static irqreturn_t bms_fsm_state_change_irq_handler(int irq, void *_chip)
{
int rc;
struct qpnp_bms_chip *chip = _chip;
pr_debug("fsm_state_changed triggered\n");
mutex_lock(&chip->bms_data_mutex);
if (chip->suspend_data_valid) {
pr_debug("Suspend data not processed yet\n");
goto fail_state;
}
rc = calib_vadc(chip);
if (rc)
pr_err("Unable to calibrate vadc rc=%d\n", rc);
/* clear old data */
memset(&chip->bms_data, 0, sizeof(chip->bms_data));
/*
* 1. Read FIFO and ACC_DATA and populate the bms_data
* 2. Clear FIFO & ACC data
* 3. Notify userspace
*/
pr_debug("prev_fsm_state=%d\n", chip->current_fsm_state);
rc = read_and_populate_fifo_data(chip);
if (rc) {
pr_err("Unable to read FIFO data rc=%d\n", rc);
goto fail_state;
}
/* read accumulator data */
rc = read_and_populate_acc_data(chip);
if (rc) {
pr_err("Unable to read ACC_SD data rc=%d\n", rc);
goto fail_state;
}
rc = update_fsm_state(chip);
if (rc) {
pr_err("Unable to read FSM state rc=%d\n", rc);
goto fail_state;
}
rc = clear_fifo_acc_data(chip);
if (rc)
pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc);
/* update the sequence number */
chip->bms_data.seq_num = chip->seq_num++;
dump_bms_data(__func__, chip);
/* signal the read thread */
chip->data_ready = 1;
wake_up_interruptible(&chip->bms_wait_q);
/* hold a wake lock until the read thread is scheduled */
if (chip->bms_dev_open)
pm_stay_awake(chip->dev);
fail_state:
mutex_unlock(&chip->bms_data_mutex);
return IRQ_HANDLED;
}
static int read_shutdown_ocv_soc(struct qpnp_bms_chip *chip)
{
u8 stored_soc = 0;
u16 stored_ocv = 0;
int rc;
rc = qpnp_read_wrapper(chip, (u8 *)&stored_ocv,
chip->base + BMS_OCV_REG, 2);
if (rc) {
pr_err("failed to read addr = %d %d\n",
chip->base + BMS_OCV_REG, rc);
return -EINVAL;
}
/* if shutdwon ocv is invalid, reject shutdown soc too */
if (!stored_ocv || (stored_ocv == OCV_INVALID)) {
pr_debug("shutdown OCV %d - invalid\n", stored_ocv);
chip->shutdown_ocv = OCV_INVALID;
chip->shutdown_soc = SOC_INVALID;
return -EINVAL;
}
chip->shutdown_ocv = stored_ocv * 1000;
/*
* The previous SOC is stored in the first 7 bits of the register as
* (Shutdown SOC + 1). This allows for register reset values of both
* 0x00 and 0xFF.
*/
rc = qpnp_read_wrapper(chip, &stored_soc, chip->base + BMS_SOC_REG, 1);
if (rc) {
pr_err("failed to read addr = %d %d\n",
chip->base + BMS_SOC_REG, rc);
return -EINVAL;
}
if (!stored_soc || stored_soc == SOC_INVALID) {
chip->shutdown_soc = SOC_INVALID;
chip->shutdown_ocv = OCV_INVALID;
return -EINVAL;
} else {
chip->shutdown_soc = (stored_soc >> 1) - 1;
}
pr_debug("shutdown_ocv=%d shutdown_soc=%d\n",
chip->shutdown_ocv, chip->shutdown_soc);
return 0;
}
static int interpolate_current_comp(int die_temp)
{
int i;
int num_rows = ARRAY_SIZE(temp_curr_comp_lut);
if (die_temp <= (temp_curr_comp_lut[0].temp_decideg))
return temp_curr_comp_lut[0].current_ma;
if (die_temp >= (temp_curr_comp_lut[num_rows - 1].temp_decideg))
return temp_curr_comp_lut[num_rows - 1].current_ma;
for (i = 0; i < num_rows - 1; i++)
if (die_temp <= (temp_curr_comp_lut[i].temp_decideg))
break;
if (die_temp == (temp_curr_comp_lut[i].temp_decideg))
return temp_curr_comp_lut[i].current_ma;
return linear_interpolate(
temp_curr_comp_lut[i - 1].current_ma,
temp_curr_comp_lut[i - 1].temp_decideg,
temp_curr_comp_lut[i].current_ma,
temp_curr_comp_lut[i].temp_decideg,
die_temp);
}
static void adjust_pon_ocv(struct qpnp_bms_chip *chip, int batt_temp)
{
int rc, current_ma, rbatt_mohm, die_temp, delta_uv, pc;
struct qpnp_vadc_result result;
rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result);
if (rc) {
pr_err("error reading adc channel=%d, rc=%d\n", DIE_TEMP, rc);
} else {
pc = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
batt_temp, chip->last_ocv_uv / 1000);
/*
* For pc < 2, use the rbatt of pc = 2. This is to avoid
* the huge rbatt values at pc < 2 which can disrupt the pon_ocv
* calculations.
*/
if (pc < 2)
pc = 2;
rbatt_mohm = get_rbatt(chip, pc, batt_temp);
/* convert die_temp to DECIDEGC */
die_temp = (int)result.physical / 100;
current_ma = interpolate_current_comp(die_temp);
delta_uv = rbatt_mohm * current_ma;
pr_debug("PON OCV changed from %d to %d pc=%d rbatt=%d current_ma=%d die_temp=%d batt_temp=%d delta_uv=%d\n",
chip->last_ocv_uv, chip->last_ocv_uv + delta_uv, pc,
rbatt_mohm, current_ma, die_temp, batt_temp, delta_uv);
chip->last_ocv_uv += delta_uv;
}
}
static int calculate_initial_soc(struct qpnp_bms_chip *chip)
{
int rc, batt_temp = 0, est_ocv = 0;
rc = get_batt_therm(chip, &batt_temp);
if (rc < 0) {
pr_err("Unable to read batt temp, using default=%d\n",
BMS_DEFAULT_TEMP);
batt_temp = BMS_DEFAULT_TEMP;
}
rc = read_and_update_ocv(chip, batt_temp, true);
if (rc) {
pr_err("Unable to read PON OCV rc=%d\n", rc);
return rc;
}
rc = read_shutdown_ocv_soc(chip);
if (rc < 0 || chip->dt.cfg_ignore_shutdown_soc)
chip->shutdown_soc_invalid = true;
if (chip->warm_reset) {
/*
* if we have powered on from warm reset -
* Always use shutdown SOC. If shudown SOC is invalid then
* estimate OCV
*/
if (chip->shutdown_soc_invalid) {
pr_debug("Estimate OCV\n");
est_ocv = estimate_ocv(chip);
if (est_ocv <= 0) {
pr_err("Unable to estimate OCV rc=%d\n",
est_ocv);
return -EINVAL;
}
chip->last_ocv_uv = est_ocv;
chip->calculated_soc = lookup_soc_ocv(chip, est_ocv,
batt_temp);
} else {
chip->last_ocv_uv = chip->shutdown_ocv;
chip->last_soc = chip->shutdown_soc;
chip->calculated_soc = lookup_soc_ocv(chip,
chip->shutdown_ocv, batt_temp);
pr_debug("Using shutdown SOC\n");
}
} else {
/*
* In PM8916 2.0 PON OCV calculation is delayed due to
* change in the ordering of power-on sequence of LDO6.
* Adjust PON OCV to include current during PON.
*/
if (chip->workaround_flag & WRKARND_PON_OCV_COMP)
adjust_pon_ocv(chip, batt_temp);
/* !warm_reset use PON OCV only if shutdown SOC is invalid */
chip->calculated_soc = lookup_soc_ocv(chip,
chip->last_ocv_uv, batt_temp);
if (!chip->shutdown_soc_invalid &&
(abs(chip->shutdown_soc - chip->calculated_soc) <
chip->dt.cfg_shutdown_soc_valid_limit)) {
chip->last_ocv_uv = chip->shutdown_ocv;
chip->last_soc = chip->shutdown_soc;
chip->calculated_soc = lookup_soc_ocv(chip,
chip->shutdown_ocv, batt_temp);
pr_debug("Using shutdown SOC\n");
} else {
chip->shutdown_soc_invalid = true;
pr_debug("Using PON SOC\n");
}
}
/* store the start-up OCV for voltage-based-soc */
chip->voltage_soc_uv = chip->last_ocv_uv;
pr_info("warm_reset=%d est_ocv=%d shutdown_soc_invalid=%d shutdown_ocv=%d shutdown_soc=%d last_soc=%d calculated_soc=%d last_ocv_uv=%d\n",
chip->warm_reset, est_ocv, chip->shutdown_soc_invalid,
chip->shutdown_ocv, chip->shutdown_soc, chip->last_soc,
chip->calculated_soc, chip->last_ocv_uv);
return 0;
}
static int calculate_initial_aging_comp(struct qpnp_bms_chip *chip)
{
int rc;
bool battery_removed = is_battery_replaced_in_offmode(chip);
if (battery_removed || chip->shutdown_soc_invalid) {
pr_info("Clearing aging data battery_removed=%d shutdown_soc_invalid=%d\n",
battery_removed, chip->shutdown_soc_invalid);
chip->charge_cycles = 0;
chip->charge_increase = 0;
rc = backup_charge_cycle(chip);
if (rc)
pr_err("Unable to reset aging data rc=%d\n", rc);
} else {
rc = read_chgcycle_data_from_backup(chip);
if (rc)
pr_err("Unable to read aging data rc=%d\n", rc);
}
pr_debug("Initial aging data charge_cycles=%u charge_increase=%u\n",
chip->charge_cycles, chip->charge_increase);
return rc;
}
static int bms_load_hw_defaults(struct qpnp_bms_chip *chip)
{
u8 val, state, bms_en = 0;
u32 interval[2], count[2], fifo[2];
int rc;
/* S3 OCV tolerence threshold */
if (chip->dt.cfg_s3_ocv_tol_uv >= 0 &&
chip->dt.cfg_s3_ocv_tol_uv <= MAX_OCV_TOL_THRESHOLD) {
val = chip->dt.cfg_s3_ocv_tol_uv / OCV_TOL_LSB_UV;
rc = qpnp_masked_write_base(chip,
chip->base + S3_OCV_TOL_CTL_REG, 0xFF, val);
if (rc) {
pr_err("Unable to write s3_ocv_tol_threshold rc=%d\n",
rc);
return rc;
}
}
/* S1 accumulator threshold */
if (chip->dt.cfg_s1_sample_count >= 1 &&
chip->dt.cfg_s1_sample_count <= MAX_SAMPLE_COUNT) {
val = (chip->dt.cfg_s1_sample_count > 1) ?
(ilog2(chip->dt.cfg_s1_sample_count) - 1) : 0;
rc = qpnp_masked_write_base(chip,
chip->base + S1_ACC_CNT_REG,
ACC_CNT_MASK, val);
if (rc) {
pr_err("Unable to write s1 sample count rc=%d\n", rc);
return rc;
}
}
/* S2 accumulator threshold */
if (chip->dt.cfg_s2_sample_count >= 1 &&
chip->dt.cfg_s2_sample_count <= MAX_SAMPLE_COUNT) {
val = (chip->dt.cfg_s2_sample_count > 1) ?
(ilog2(chip->dt.cfg_s2_sample_count) - 1) : 0;
rc = qpnp_masked_write_base(chip,
chip->base + S2_ACC_CNT_REG,
ACC_CNT_MASK, val);
if (rc) {
pr_err("Unable to write s2 sample count rc=%d\n", rc);
return rc;
}
}
if (chip->dt.cfg_s1_sample_interval_ms >= 0 &&
chip->dt.cfg_s1_sample_interval_ms <= MAX_SAMPLE_INTERVAL) {
val = chip->dt.cfg_s1_sample_interval_ms / 10;
rc = qpnp_write_wrapper(chip, &val,
chip->base + S1_SAMPLE_INTVL_REG, 1);
if (rc) {
pr_err("Unable to write s1 sample inteval rc=%d\n", rc);
return rc;
}
}
if (chip->dt.cfg_s2_sample_interval_ms >= 0 &&
chip->dt.cfg_s2_sample_interval_ms <= MAX_SAMPLE_INTERVAL) {
val = chip->dt.cfg_s2_sample_interval_ms / 10;
rc = qpnp_write_wrapper(chip, &val,
chip->base + S2_SAMPLE_INTVL_REG, 1);
if (rc) {
pr_err("Unable to write s2 sample inteval rc=%d\n", rc);
return rc;
}
}
if (chip->dt.cfg_s1_fifo_length >= 0 &&
chip->dt.cfg_s1_fifo_length <= MAX_FIFO_REGS) {
rc = qpnp_masked_write_base(chip, chip->base + FIFO_LENGTH_REG,
S1_FIFO_LENGTH_MASK,
chip->dt.cfg_s1_fifo_length);
if (rc) {
pr_err("Unable to write s1 fifo length rc=%d\n", rc);
return rc;
}
}
if (chip->dt.cfg_s2_fifo_length >= 0 &&
chip->dt.cfg_s2_fifo_length <= MAX_FIFO_REGS) {
rc = qpnp_masked_write_base(chip, chip->base +
FIFO_LENGTH_REG, S2_FIFO_LENGTH_MASK,
chip->dt.cfg_s2_fifo_length
<< S2_FIFO_LENGTH_SHIFT);
if (rc) {
pr_err("Unable to write s2 fifo length rc=%d\n", rc);
return rc;
}
}
get_sample_interval(chip, S1_STATE, &interval[0]);
get_sample_interval(chip, S2_STATE, &interval[1]);
get_sample_count(chip, S1_STATE, &count[0]);
get_sample_count(chip, S2_STATE, &count[1]);
get_fifo_length(chip, S1_STATE, &fifo[0]);
get_fifo_length(chip, S2_STATE, &fifo[1]);
/* Force the BMS state to S2 at boot-up */
rc = get_fsm_state(chip, &state);
if (rc)
pr_err("Unable to get FSM state rc=%d\n", rc);
if (rc || (state != S2_STATE)) {
pr_debug("Forcing S2 state\n");
rc = force_fsm_state(chip, S2_STATE);
if (rc)
pr_err("Unable to set FSM state rc=%d\n", rc);
}
rc = qpnp_read_wrapper(chip, &bms_en, chip->base + EN_CTL_REG, 1);
if (rc) {
pr_err("Unable to read BMS_EN state rc=%d\n", rc);
return rc;
}
rc = update_fsm_state(chip);
if (rc) {
pr_err("Unable to read FSM state rc=%d\n", rc);
return rc;
}
pr_info("BMS_EN=%d Sample_Interval-S1=[%d]S2=[%d] Sample_Count-S1=[%d]S2=[%d] Fifo_Length-S1=[%d]S2=[%d] FSM_state=%d\n",
!!bms_en, interval[0], interval[1], count[0],
count[1], fifo[0], fifo[1],
chip->current_fsm_state);
return 0;
}
static ssize_t vm_bms_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int rc;
struct qpnp_bms_chip *chip = file->private_data;
if (!chip->data_ready && (file->f_flags & O_NONBLOCK)) {
rc = -EAGAIN;
goto fail_read;
}
rc = wait_event_interruptible(chip->bms_wait_q, chip->data_ready);
if (rc) {
pr_debug("wait failed! rc=%d\n", rc);
goto fail_read;
}
if (!chip->data_ready) {
pr_debug("No Data, false wakeup\n");
rc = -EFAULT;
goto fail_read;
}
mutex_lock(&chip->bms_data_mutex);
if (copy_to_user(buf, &chip->bms_data, sizeof(chip->bms_data))) {
pr_err("Failed in copy_to_user\n");
mutex_unlock(&chip->bms_data_mutex);
rc = -EFAULT;
goto fail_read;
}
pr_debug("Data copied!!\n");
chip->data_ready = 0;
mutex_unlock(&chip->bms_data_mutex);
/* wakelock-timeout for userspace to pick up */
pm_wakeup_event(chip->dev, BMS_READ_TIMEOUT);
return sizeof(chip->bms_data);
fail_read:
pm_relax(chip->dev);
return rc;
}
static int vm_bms_open(struct inode *inode, struct file *file)
{
struct qpnp_bms_chip *chip = container_of(inode->i_cdev,
struct qpnp_bms_chip, bms_cdev);
mutex_lock(&chip->bms_device_mutex);
if (chip->bms_dev_open) {
pr_debug("BMS device already open\n");
mutex_unlock(&chip->bms_device_mutex);
return -EBUSY;
}
chip->bms_dev_open = true;
file->private_data = chip;
pr_debug("BMS device opened\n");
mutex_unlock(&chip->bms_device_mutex);
return 0;
}
static int vm_bms_release(struct inode *inode, struct file *file)
{
struct qpnp_bms_chip *chip = container_of(inode->i_cdev,
struct qpnp_bms_chip, bms_cdev);
mutex_lock(&chip->bms_device_mutex);
chip->bms_dev_open = false;
pm_relax(chip->dev);
pr_debug("BMS device closed\n");
mutex_unlock(&chip->bms_device_mutex);
return 0;
}
static const struct file_operations bms_fops = {
.owner = THIS_MODULE,
.open = vm_bms_open,
.read = vm_bms_read,
.release = vm_bms_release,
};
static void bms_init_defaults(struct qpnp_bms_chip *chip)
{
chip->data_ready = 0;
chip->last_ocv_raw = OCV_UNINITIALIZED;
chip->battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
chip->battery_present = -EINVAL;
chip->calculated_soc = -EINVAL;
chip->last_soc = -EINVAL;
chip->vbms_lv_wake_source.disabled = 1;
chip->vbms_cv_wake_source.disabled = 1;
chip->vbms_soc_wake_source.disabled = 1;
chip->ocv_at_100 = -EINVAL;
chip->prev_soc_uuc = -EINVAL;
chip->charge_cycles = 0;
chip->start_soc = 0;
chip->end_soc = 0;
chip->charge_increase = 0;
}
#define SPMI_REQUEST_IRQ(chip, rc, irq_name) \
do { \
rc = devm_request_threaded_irq(chip->dev, \
chip->irq_name##_irq.irq, NULL, \
bms_##irq_name##_irq_handler, \
IRQF_TRIGGER_RISING | IRQF_ONESHOT, \
#irq_name, chip); \
if (rc < 0) \
pr_err("Unable to request " #irq_name " irq: %d\n", rc);\
} while (0)
#define SPMI_FIND_IRQ(chip, irq_name, rc) \
do { \
chip->irq_name##_irq.irq = spmi_get_irq_byname(chip->spmi, \
resource, #irq_name); \
if (chip->irq_name##_irq.irq < 0) { \
rc = chip->irq_name##_irq.irq; \
pr_err("Unable to get " #irq_name " irq rc=%d\n", rc); \
} \
} while (0)
static int bms_request_irqs(struct qpnp_bms_chip *chip)
{
int rc;
SPMI_REQUEST_IRQ(chip, rc, fifo_update_done);
if (rc < 0)
return rc;
SPMI_REQUEST_IRQ(chip, rc, fsm_state_change);
if (rc < 0)
return rc;
/* Disable the state change IRQ */
disable_bms_irq(&chip->fsm_state_change_irq);
enable_irq_wake(chip->fifo_update_done_irq.irq);
return 0;
}
static int bms_find_irqs(struct qpnp_bms_chip *chip,
struct spmi_resource *resource)
{
int rc = 0;
SPMI_FIND_IRQ(chip, fifo_update_done, rc);
if (rc < 0)
return rc;
SPMI_FIND_IRQ(chip, fsm_state_change, rc);
if (rc < 0)
return rc;
return 0;
}
static int64_t read_battery_id(struct qpnp_bms_chip *chip)
{
int rc;
struct qpnp_vadc_result result;
rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result);
if (rc) {
pr_err("error reading batt id channel = %d, rc = %d\n",
LR_MUX2_BAT_ID, rc);
return rc;
}
return result.physical;
}
static int show_bms_config(struct seq_file *m, void *data)
{
struct qpnp_bms_chip *chip = m->private;
int s1_sample_interval, s2_sample_interval;
int s1_sample_count, s2_sample_count;
int s1_fifo_length, s2_fifo_length;
get_sample_interval(chip, S1_STATE, &s1_sample_interval);
get_sample_interval(chip, S2_STATE, &s2_sample_interval);
get_sample_count(chip, S1_STATE, &s1_sample_count);
get_sample_count(chip, S2_STATE, &s2_sample_count);
get_fifo_length(chip, S1_STATE, &s1_fifo_length);
get_fifo_length(chip, S2_STATE, &s2_fifo_length);
seq_printf(m, "r_conn_mohm\t=\t%d\n"
"v_cutoff_uv\t=\t%d\n"
"max_voltage_uv\t=\t%d\n"
"use_voltage_soc\t=\t%d\n"
"low_soc_calc_threshold\t=\t%d\n"
"low_soc_calculate_soc_ms\t=\t%d\n"
"low_voltage_threshold\t=\t%d\n"
"low_voltage_calculate_soc_ms\t=\t%d\n"
"calculate_soc_ms\t=\t%d\n"
"voltage_soc_timeout_ms\t=\t%d\n"
"ignore_shutdown_soc\t=\t%d\n"
"shutdown_soc_valid_limit\t=\t%d\n"
"force_s3_on_suspend\t=\t%d\n"
"report_charger_eoc\t=\t%d\n"
"aging_compensation\t=\t%d\n"
"use_reported_soc\t=\t%d\n"
"s1_sample_interval_ms\t=\t%d\n"
"s2_sample_interval_ms\t=\t%d\n"
"s1_sample_count\t=\t%d\n"
"s2_sample_count\t=\t%d\n"
"s1_fifo_length\t=\t%d\n"
"s2_fifo_length\t=\t%d\n",
chip->dt.cfg_r_conn_mohm,
chip->dt.cfg_v_cutoff_uv,
chip->dt.cfg_max_voltage_uv,
chip->dt.cfg_use_voltage_soc,
chip->dt.cfg_low_soc_calc_threshold,
chip->dt.cfg_low_soc_calculate_soc_ms,
chip->dt.cfg_low_voltage_threshold,
chip->dt.cfg_low_voltage_calculate_soc_ms,
chip->dt.cfg_calculate_soc_ms,
chip->dt.cfg_voltage_soc_timeout_ms,
chip->dt.cfg_ignore_shutdown_soc,
chip->dt.cfg_shutdown_soc_valid_limit,
chip->dt.cfg_force_s3_on_suspend,
chip->dt.cfg_report_charger_eoc,
chip->dt.cfg_battery_aging_comp,
chip->dt.cfg_use_reported_soc,
s1_sample_interval,
s2_sample_interval,
s1_sample_count,
s2_sample_count,
s1_fifo_length,
s2_fifo_length);
return 0;
}
static int bms_config_open(struct inode *inode, struct file *file)
{
struct qpnp_bms_chip *chip = inode->i_private;
return single_open(file, show_bms_config, chip);
}
static const struct file_operations bms_config_debugfs_ops = {
.owner = THIS_MODULE,
.open = bms_config_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int show_bms_status(struct seq_file *m, void *data)
{
struct qpnp_bms_chip *chip = m->private;
seq_printf(m, "bms_psy_registered\t=\t%d\n"
"bms_dev_open\t=\t%d\n"
"warm_reset\t=\t%d\n"
"battery_status\t=\t%d\n"
"battery_present\t=\t%d\n"
"in_cv_state\t=\t%d\n"
"calculated_soc\t=\t%d\n"
"last_soc\t=\t%d\n"
"last_ocv_uv\t=\t%d\n"
"last_ocv_raw\t=\t%d\n"
"last_soc_unbound\t=\t%d\n"
"current_fsm_state\t=\t%d\n"
"current_now\t=\t%d\n"
"ocv_at_100\t=\t%d\n"
"low_voltage_ws_active\t=\t%d\n"
"cv_ws_active\t=\t%d\n",
chip->bms_psy_registered,
chip->bms_dev_open,
chip->warm_reset,
chip->battery_status,
chip->battery_present,
chip->in_cv_state,
chip->calculated_soc,
chip->last_soc,
chip->last_ocv_uv,
chip->last_ocv_raw,
chip->last_soc_unbound,
chip->current_fsm_state,
chip->current_now,
chip->ocv_at_100,
bms_wake_active(&chip->vbms_lv_wake_source),
bms_wake_active(&chip->vbms_cv_wake_source));
return 0;
}
static int bms_status_open(struct inode *inode, struct file *file)
{
struct qpnp_bms_chip *chip = inode->i_private;
return single_open(file, show_bms_status, chip);
}
static const struct file_operations bms_status_debugfs_ops = {
.owner = THIS_MODULE,
.open = bms_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int show_bms_data(struct seq_file *m, void *data)
{
struct qpnp_bms_chip *chip = m->private;
int i;
mutex_lock(&chip->bms_data_mutex);
seq_printf(m, "seq_num=%d\n", chip->bms_data.seq_num);
for (i = 0; i < chip->bms_data.num_fifo; i++)
seq_printf(m, "fifo_uv[%d]=%d sample_count=%d interval_ms=%d\n",
i, chip->bms_data.fifo_uv[i],
chip->bms_data.sample_count,
chip->bms_data.sample_interval_ms);
seq_printf(m, "acc_uv=%d sample_count=%d sample_interval=%d\n",
chip->bms_data.acc_uv, chip->bms_data.acc_count,
chip->bms_data.sample_interval_ms);
mutex_unlock(&chip->bms_data_mutex);
return 0;
}
static int bms_data_open(struct inode *inode, struct file *file)
{
struct qpnp_bms_chip *chip = inode->i_private;
return single_open(file, show_bms_data, chip);
}
static const struct file_operations bms_data_debugfs_ops = {
.owner = THIS_MODULE,
.open = bms_data_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int set_battery_data(struct qpnp_bms_chip *chip)
{
int64_t battery_id;
int rc = 0;
struct bms_battery_data *batt_data;
struct device_node *node;
battery_id = read_battery_id(chip);
if (battery_id < 0) {
pr_err("cannot read battery id err = %lld\n", battery_id);
return battery_id;
}
node = of_find_node_by_name(chip->spmi->dev.of_node,
"qcom,battery-data");
if (!node) {
pr_err("No available batterydata\n");
return -EINVAL;
}
batt_data = devm_kzalloc(chip->dev,
sizeof(struct bms_battery_data), GFP_KERNEL);
if (!batt_data) {
pr_err("Could not alloc battery data\n");
return -EINVAL;
}
batt_data->fcc_temp_lut = devm_kzalloc(chip->dev,
sizeof(struct single_row_lut), GFP_KERNEL);
batt_data->pc_temp_ocv_lut = devm_kzalloc(chip->dev,
sizeof(struct pc_temp_ocv_lut), GFP_KERNEL);
batt_data->rbatt_sf_lut = devm_kzalloc(chip->dev,
sizeof(struct sf_lut), GFP_KERNEL);
batt_data->ibat_acc_lut = devm_kzalloc(chip->dev,
sizeof(struct ibat_temp_acc_lut), GFP_KERNEL);
batt_data->max_voltage_uv = -1;
batt_data->cutoff_uv = -1;
batt_data->iterm_ua = -1;
/*
* if the alloced luts are 0s, of_batterydata_read_data ignores
* them.
*/
rc = of_batterydata_read_data(node, batt_data, battery_id);
if (rc || !batt_data->pc_temp_ocv_lut
|| !batt_data->fcc_temp_lut
|| !batt_data->rbatt_sf_lut) {
pr_err("battery data load failed\n");
devm_kfree(chip->dev, batt_data->fcc_temp_lut);
devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut);
devm_kfree(chip->dev, batt_data->rbatt_sf_lut);
devm_kfree(chip->dev, batt_data->ibat_acc_lut);
devm_kfree(chip->dev, batt_data);
return rc;
}
if (batt_data->pc_temp_ocv_lut == NULL) {
pr_err("temp ocv lut table has not been loaded\n");
devm_kfree(chip->dev, batt_data->fcc_temp_lut);
devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut);
devm_kfree(chip->dev, batt_data->rbatt_sf_lut);
devm_kfree(chip->dev, batt_data->ibat_acc_lut);
devm_kfree(chip->dev, batt_data);
return -EINVAL;
}
/* check if ibat_acc_lut is valid */
if (!batt_data->ibat_acc_lut->rows) {
pr_info("ibat_acc_lut not present\n");
devm_kfree(chip->dev, batt_data->ibat_acc_lut);
batt_data->ibat_acc_lut = NULL;
}
/* Override battery properties if specified in the battery profile */
if (batt_data->max_voltage_uv >= 0)
chip->dt.cfg_max_voltage_uv = batt_data->max_voltage_uv;
if (batt_data->cutoff_uv >= 0)
chip->dt.cfg_v_cutoff_uv = batt_data->cutoff_uv;
chip->batt_data = batt_data;
return 0;
}
static int parse_spmi_dt_properties(struct qpnp_bms_chip *chip,
struct spmi_device *spmi)
{
struct spmi_resource *spmi_resource;
struct resource *resource;
int rc;
chip->dev = &(spmi->dev);
chip->spmi = spmi;
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
pr_err("qpnp_vm_bms: spmi resource absent\n");
return -ENXIO;
}
resource = spmi_get_resource(spmi, spmi_resource,
IORESOURCE_MEM, 0);
if (!(resource && resource->start)) {
pr_err("node %s IO resource absent!\n",
spmi->dev.of_node->full_name);
return -ENXIO;
}
pr_debug("Node name = %s\n", spmi_resource->of_node->name);
if (strcmp("qcom,batt-pres-status",
spmi_resource->of_node->name) == 0) {
chip->batt_pres_addr = resource->start;
continue;
}
if (strcmp("qcom,qpnp-chg-pres",
spmi_resource->of_node->name) == 0) {
chip->chg_pres_addr = resource->start;
continue;
}
chip->base = resource->start;
rc = bms_find_irqs(chip, spmi_resource);
if (rc) {
pr_err("Could not find irqs rc=%d\n", rc);
return rc;
}
}
if (chip->base == 0) {
dev_err(&spmi->dev, "BMS peripheral was not registered\n");
return -EINVAL;
}
pr_debug("bms-base=0x%04x bat-pres-reg=0x%04x qpnp-chg-pres=0x%04x\n",
chip->base, chip->batt_pres_addr, chip->chg_pres_addr);
return 0;
}
#define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval) \
do { \
if (retval) \
break; \
retval = of_property_read_u32(chip->spmi->dev.of_node, \
"qcom," qpnp_spmi_property, \
&chip->dt.chip_prop); \
if (retval) { \
pr_err("Error reading " #qpnp_spmi_property \
" property %d\n", retval); \
} \
} while (0)
#define SPMI_PROP_READ_OPTIONAL(chip_prop, qpnp_spmi_property, retval) \
do { \
retval = of_property_read_u32(chip->spmi->dev.of_node, \
"qcom," qpnp_spmi_property, \
&chip->dt.chip_prop); \
if (retval) \
chip->dt.chip_prop = -EINVAL; \
} while (0)
static int parse_bms_dt_properties(struct qpnp_bms_chip *chip)
{
int rc = 0;
SPMI_PROP_READ(cfg_v_cutoff_uv, "v-cutoff-uv", rc);
SPMI_PROP_READ(cfg_max_voltage_uv, "max-voltage-uv", rc);
SPMI_PROP_READ(cfg_r_conn_mohm, "r-conn-mohm", rc);
SPMI_PROP_READ(cfg_shutdown_soc_valid_limit,
"shutdown-soc-valid-limit", rc);
SPMI_PROP_READ(cfg_low_soc_calc_threshold,
"low-soc-calculate-soc-threshold", rc);
SPMI_PROP_READ(cfg_low_soc_calculate_soc_ms,
"low-soc-calculate-soc-ms", rc);
SPMI_PROP_READ(cfg_low_voltage_calculate_soc_ms,
"low-voltage-calculate-soc-ms", rc);
SPMI_PROP_READ(cfg_calculate_soc_ms, "calculate-soc-ms", rc);
SPMI_PROP_READ(cfg_low_voltage_threshold, "low-voltage-threshold", rc);
SPMI_PROP_READ(cfg_voltage_soc_timeout_ms,
"volatge-soc-timeout-ms", rc);
if (rc) {
pr_err("Missing required properties rc=%d\n", rc);
return rc;
}
SPMI_PROP_READ_OPTIONAL(cfg_s1_sample_interval_ms,
"s1-sample-interval-ms", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s2_sample_interval_ms,
"s2-sample-interval-ms", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s1_sample_count, "s1-sample-count", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s2_sample_count, "s2-sample-count", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s1_fifo_length, "s1-fifo-length", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s2_fifo_length, "s2-fifo-length", rc);
SPMI_PROP_READ_OPTIONAL(cfg_s3_ocv_tol_uv, "s3-ocv-tolerence-uv", rc);
SPMI_PROP_READ_OPTIONAL(cfg_low_soc_fifo_length,
"low-soc-fifo-length", rc);
SPMI_PROP_READ_OPTIONAL(cfg_soc_resume_limit, "resume-soc", rc);
SPMI_PROP_READ_OPTIONAL(cfg_low_temp_threshold,
"low-temp-threshold", rc);
if (rc)
chip->dt.cfg_low_temp_threshold = 0;
SPMI_PROP_READ_OPTIONAL(cfg_ibat_avg_samples,
"ibat-avg-samples", rc);
if (rc || (chip->dt.cfg_ibat_avg_samples <= 0) ||
(chip->dt.cfg_ibat_avg_samples > IAVG_SAMPLES))
chip->dt.cfg_ibat_avg_samples = IAVG_SAMPLES;
chip->dt.cfg_ignore_shutdown_soc = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,ignore-shutdown-soc");
chip->dt.cfg_use_voltage_soc = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,use-voltage-soc");
chip->dt.cfg_force_s3_on_suspend = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,force-s3-on-suspend");
chip->dt.cfg_report_charger_eoc = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,report-charger-eoc");
chip->dt.cfg_disable_bms = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,disable-bms");
chip->dt.cfg_force_bms_active_on_charger = of_property_read_bool(
chip->spmi->dev.of_node,
"qcom,force-bms-active-on-charger");
chip->dt.cfg_battery_aging_comp = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,batt-aging-comp");
chip->dt.cfg_use_reported_soc = of_property_read_bool(
chip->spmi->dev.of_node, "qcom,use-reported-soc");
pr_debug("v_cutoff_uv=%d, max_v=%d\n", chip->dt.cfg_v_cutoff_uv,
chip->dt.cfg_max_voltage_uv);
pr_debug("r_conn=%d shutdown_soc_valid_limit=%d low_temp_threshold=%d ibat_avg_samples=%d\n",
chip->dt.cfg_r_conn_mohm,
chip->dt.cfg_shutdown_soc_valid_limit,
chip->dt.cfg_low_temp_threshold,
chip->dt.cfg_ibat_avg_samples);
pr_debug("ignore_shutdown_soc=%d, use_voltage_soc=%d low_soc_fifo_length=%d\n",
chip->dt.cfg_ignore_shutdown_soc,
chip->dt.cfg_use_voltage_soc,
chip->dt.cfg_low_soc_fifo_length);
pr_debug("force-s3-on-suspend=%d report-charger-eoc=%d disable-bms=%d disable-suspend-on-usb=%d aging_compensation=%d\n",
chip->dt.cfg_force_s3_on_suspend,
chip->dt.cfg_report_charger_eoc,
chip->dt.cfg_disable_bms,
chip->dt.cfg_force_bms_active_on_charger,
chip->dt.cfg_battery_aging_comp);
pr_debug("use-reported-soc is %d\n",
chip->dt.cfg_use_reported_soc);
return 0;
}
static int bms_get_adc(struct qpnp_bms_chip *chip,
struct spmi_device *spmi)
{
int rc = 0;
chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "bms");
if (IS_ERR(chip->vadc_dev)) {
rc = PTR_ERR(chip->vadc_dev);
if (rc == -EPROBE_DEFER)
pr_err("vadc not found - defer probe rc=%d\n", rc);
else
pr_err("vadc property missing, rc=%d\n", rc);
return rc;
}
chip->adc_tm_dev = qpnp_get_adc_tm(&spmi->dev, "bms");
if (IS_ERR(chip->adc_tm_dev)) {
rc = PTR_ERR(chip->adc_tm_dev);
if (rc == -EPROBE_DEFER)
pr_err("adc-tm not found - defer probe rc=%d\n", rc);
else
pr_err("adc-tm property missing, rc=%d\n", rc);
}
return rc;
}
static int register_bms_char_device(struct qpnp_bms_chip *chip)
{
int rc;
rc = alloc_chrdev_region(&chip->dev_no, 0, 1, "vm_bms");
if (rc) {
pr_err("Unable to allocate chrdev rc=%d\n", rc);
return rc;
}
cdev_init(&chip->bms_cdev, &bms_fops);
rc = cdev_add(&chip->bms_cdev, chip->dev_no, 1);
if (rc) {
pr_err("Unable to add bms_cdev rc=%d\n", rc);
goto unregister_chrdev;
}
chip->bms_class = class_create(THIS_MODULE, "vm_bms");
if (IS_ERR_OR_NULL(chip->bms_class)) {
pr_err("Fail to create bms class\n");
rc = -EINVAL;
goto delete_cdev;
}
chip->bms_device = device_create(chip->bms_class,
NULL, chip->dev_no,
NULL, "vm_bms");
if (IS_ERR(chip->bms_device)) {
pr_err("Fail to create bms_device device\n");
rc = -EINVAL;
goto delete_cdev;
}
return 0;
delete_cdev:
cdev_del(&chip->bms_cdev);
unregister_chrdev:
unregister_chrdev_region(chip->dev_no, 1);
return rc;
}
static int qpnp_vm_bms_probe(struct spmi_device *spmi)
{
struct qpnp_bms_chip *chip;
struct device_node *revid_dev_node;
int rc, vbatt = 0;
chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL);
if (!chip) {
pr_err("kzalloc() failed.\n");
return -ENOMEM;
}
rc = bms_get_adc(chip, spmi);
if (rc < 0) {
pr_err("Failed to get adc rc=%d\n", rc);
return rc;
}
revid_dev_node = of_parse_phandle(spmi->dev.of_node,
"qcom,pmic-revid", 0);
if (!revid_dev_node) {
pr_err("Missing qcom,pmic-revid property\n");
return -EINVAL;
}
chip->revid_data = get_revid_data(revid_dev_node);
if (IS_ERR(chip->revid_data)) {
pr_err("revid error rc = %ld\n", PTR_ERR(chip->revid_data));
return -EINVAL;
}
if ((chip->revid_data->pmic_subtype == PM8916_V2P0_SUBTYPE) &&
chip->revid_data->rev4 == PM8916_V2P0_REV4)
chip->workaround_flag |= WRKARND_PON_OCV_COMP;
rc = qpnp_pon_is_warm_reset();
if (rc < 0) {
pr_err("Error reading warm reset status rc=%d\n", rc);
return rc;
}
chip->warm_reset = !!rc;
rc = parse_spmi_dt_properties(chip, spmi);
if (rc) {
pr_err("Error registering spmi resource rc=%d\n", rc);
return rc;
}
rc = parse_bms_dt_properties(chip);
if (rc) {
pr_err("Unable to read all bms properties, rc = %d\n", rc);
return rc;
}
if (chip->dt.cfg_disable_bms) {
pr_info("VMBMS disabled (disable-bms = 1)\n");
rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG,
BMS_EN_BIT, 0);
if (rc)
pr_err("Unable to disable VMBMS rc=%d\n", rc);
return -ENODEV;
}
rc = qpnp_read_wrapper(chip, chip->revision,
chip->base + REVISION1_REG, 2);
if (rc) {
pr_err("Error reading version register rc=%d\n", rc);
return rc;
}
pr_debug("BMS version: %hhu.%hhu\n",
chip->revision[1], chip->revision[0]);
dev_set_drvdata(&spmi->dev, chip);
device_init_wakeup(&spmi->dev, 1);
mutex_init(&chip->bms_data_mutex);
mutex_init(&chip->bms_device_mutex);
mutex_init(&chip->last_soc_mutex);
mutex_init(&chip->state_change_mutex);
init_waitqueue_head(&chip->bms_wait_q);
/* read battery-id and select the battery profile */
rc = set_battery_data(chip);
if (rc) {
pr_err("Unable to read battery data %d\n", rc);
goto fail_init;
}
/* set the battery profile */
rc = config_battery_data(chip->batt_data);
if (rc) {
pr_err("Unable to config battery data %d\n", rc);
goto fail_init;
}
wakeup_source_init(&chip->vbms_lv_wake_source.source, "vbms_lv_wake");
wakeup_source_init(&chip->vbms_cv_wake_source.source, "vbms_cv_wake");
wakeup_source_init(&chip->vbms_soc_wake_source.source, "vbms_soc_wake");
INIT_DELAYED_WORK(&chip->monitor_soc_work, monitor_soc_work);
INIT_DELAYED_WORK(&chip->voltage_soc_timeout_work,
voltage_soc_timeout_work);
bms_init_defaults(chip);
bms_load_hw_defaults(chip);
if (is_battery_present(chip)) {
rc = setup_vbat_monitoring(chip);
if (rc) {
pr_err("fail to configure vbat monitoring rc=%d\n",
rc);
goto fail_setup;
}
}
rc = bms_request_irqs(chip);
if (rc) {
pr_err("error requesting bms irqs, rc = %d\n", rc);
goto fail_irq;
}
battery_insertion_check(chip);
battery_status_check(chip);
/* character device to pass data to the userspace */
rc = register_bms_char_device(chip);
if (rc) {
pr_err("Unable to regiter '/dev/vm_bms' rc=%d\n", rc);
goto fail_bms_device;
}
the_chip = chip;
calculate_initial_soc(chip);
if (chip->dt.cfg_battery_aging_comp) {
rc = calculate_initial_aging_comp(chip);
if (rc)
pr_err("Unable to calculate initial aging data rc=%d\n",
rc);
}
/* setup & register the battery power supply */
chip->bms_psy.name = "bms";
chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS;
chip->bms_psy.properties = bms_power_props;
chip->bms_psy.num_properties = ARRAY_SIZE(bms_power_props);
chip->bms_psy.get_property = qpnp_vm_bms_power_get_property;
chip->bms_psy.set_property = qpnp_vm_bms_power_set_property;
chip->bms_psy.external_power_changed = qpnp_vm_bms_ext_power_changed;
chip->bms_psy.property_is_writeable = qpnp_vm_bms_property_is_writeable;
chip->bms_psy.supplied_to = qpnp_vm_bms_supplicants;
chip->bms_psy.num_supplicants = ARRAY_SIZE(qpnp_vm_bms_supplicants);
rc = power_supply_register(chip->dev, &chip->bms_psy);
if (rc < 0) {
pr_err("power_supply_register bms failed rc = %d\n", rc);
goto fail_psy;
}
chip->bms_psy_registered = true;
rc = get_battery_voltage(chip, &vbatt);
if (rc) {
pr_err("error reading vbat_sns adc channel=%d, rc=%d\n",
VBAT_SNS, rc);
goto fail_get_vtg;
}
chip->debug_root = debugfs_create_dir("qpnp_vmbms", NULL);
if (!chip->debug_root)
pr_err("Couldn't create debug dir\n");
if (chip->debug_root) {
struct dentry *ent;
ent = debugfs_create_file("bms_data", S_IFREG | S_IRUGO,
chip->debug_root, chip,
&bms_data_debugfs_ops);
if (!ent)
pr_err("Couldn't create bms_data debug file\n");
ent = debugfs_create_file("bms_config", S_IFREG | S_IRUGO,
chip->debug_root, chip,
&bms_config_debugfs_ops);
if (!ent)
pr_err("Couldn't create bms_config debug file\n");
ent = debugfs_create_file("bms_status", S_IFREG | S_IRUGO,
chip->debug_root, chip,
&bms_status_debugfs_ops);
if (!ent)
pr_err("Couldn't create bms_status debug file\n");
}
schedule_delayed_work(&chip->monitor_soc_work, 0);
/*
* schedule a work to check if the userspace vmbms module
* has registered. Fall-back to voltage-based-soc reporting
* if it has not.
*/
schedule_delayed_work(&chip->voltage_soc_timeout_work,
msecs_to_jiffies(chip->dt.cfg_voltage_soc_timeout_ms));
pr_info("probe success: soc=%d vbatt=%d ocv=%d warm_reset=%d\n",
get_prop_bms_capacity(chip), vbatt,
chip->last_ocv_uv, chip->warm_reset);
return rc;
fail_get_vtg:
power_supply_unregister(&chip->bms_psy);
fail_psy:
device_destroy(chip->bms_class, chip->dev_no);
cdev_del(&chip->bms_cdev);
unregister_chrdev_region(chip->dev_no, 1);
fail_bms_device:
chip->bms_psy_registered = false;
fail_irq:
reset_vbat_monitoring(chip);
fail_setup:
wakeup_source_trash(&chip->vbms_lv_wake_source.source);
wakeup_source_trash(&chip->vbms_cv_wake_source.source);
wakeup_source_trash(&chip->vbms_soc_wake_source.source);
fail_init:
mutex_destroy(&chip->bms_data_mutex);
mutex_destroy(&chip->last_soc_mutex);
mutex_destroy(&chip->state_change_mutex);
mutex_destroy(&chip->bms_device_mutex);
the_chip = NULL;
return rc;
}
static int qpnp_vm_bms_remove(struct spmi_device *spmi)
{
struct qpnp_bms_chip *chip = dev_get_drvdata(&spmi->dev);
cancel_delayed_work_sync(&chip->monitor_soc_work);
debugfs_remove_recursive(chip->debug_root);
device_destroy(chip->bms_class, chip->dev_no);
cdev_del(&chip->bms_cdev);
unregister_chrdev_region(chip->dev_no, 1);
reset_vbat_monitoring(chip);
wakeup_source_trash(&chip->vbms_lv_wake_source.source);
wakeup_source_trash(&chip->vbms_cv_wake_source.source);
wakeup_source_trash(&chip->vbms_soc_wake_source.source);
mutex_destroy(&chip->bms_data_mutex);
mutex_destroy(&chip->last_soc_mutex);
mutex_destroy(&chip->state_change_mutex);
mutex_destroy(&chip->bms_device_mutex);
power_supply_unregister(&chip->bms_psy);
dev_set_drvdata(&spmi->dev, NULL);
the_chip = NULL;
return 0;
}
static void process_suspend_data(struct qpnp_bms_chip *chip)
{
int rc;
mutex_lock(&chip->bms_data_mutex);
chip->suspend_data_valid = false;
memset(&chip->bms_data, 0, sizeof(chip->bms_data));
rc = read_and_populate_fifo_data(chip);
if (rc)
pr_err("Unable to read FIFO data rc=%d\n", rc);
rc = read_and_populate_acc_data(chip);
if (rc)
pr_err("Unable to read ACC_SD data rc=%d\n", rc);
rc = clear_fifo_acc_data(chip);
if (rc)
pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc);
if (chip->bms_data.num_fifo || chip->bms_data.acc_count) {
pr_debug("suspend data valid\n");
chip->suspend_data_valid = true;
}
mutex_unlock(&chip->bms_data_mutex);
}
static void process_resume_data(struct qpnp_bms_chip *chip)
{
int rc, batt_temp = 0;
int old_ocv = 0;
bool ocv_updated = false;
rc = get_batt_therm(chip, &batt_temp);
if (rc < 0) {
pr_err("Unable to read batt temp, using default=%d\n",
BMS_DEFAULT_TEMP);
batt_temp = BMS_DEFAULT_TEMP;
}
mutex_lock(&chip->bms_data_mutex);
/*
* We can get a h/w OCV update when the sleep_b
* is low, which is possible when APPS is suspended.
* So check for an OCV update only in bms_resume
*/
old_ocv = chip->last_ocv_uv;
rc = read_and_update_ocv(chip, batt_temp, false);
if (rc)
pr_err("Unable to read/upadate OCV rc=%d\n", rc);
if (old_ocv != chip->last_ocv_uv) {
ocv_updated = true;
/* new OCV, clear suspended data */
chip->suspend_data_valid = false;
memset(&chip->bms_data, 0, sizeof(chip->bms_data));
chip->calculated_soc = lookup_soc_ocv(chip,
chip->last_ocv_uv, batt_temp);
pr_debug("OCV in sleep SOC=%d\n", chip->calculated_soc);
chip->last_soc_unbound = true;
chip->voltage_soc_uv = chip->last_ocv_uv;
pr_debug("update bms_psy\n");
power_supply_changed(&chip->bms_psy);
}
if (ocv_updated || chip->suspend_data_valid) {
/* there is data to be sent */
pr_debug("ocv_updated=%d suspend_data_valid=%d\n",
ocv_updated, chip->suspend_data_valid);
chip->bms_data.seq_num = chip->seq_num++;
dump_bms_data(__func__, chip);
chip->data_ready = 1;
wake_up_interruptible(&chip->bms_wait_q);
if (chip->bms_dev_open)
pm_stay_awake(chip->dev);
}
chip->suspend_data_valid = false;
mutex_unlock(&chip->bms_data_mutex);
}
static int bms_suspend(struct device *dev)
{
struct qpnp_bms_chip *chip = dev_get_drvdata(dev);
bool battery_charging = is_battery_charging(chip);
bool hi_power_state = is_hi_power_state_requested(chip);
bool charger_present = is_charger_present(chip);
bool bms_suspend_config;
/*
* Keep BMS FSM active if 'cfg_force_bms_active_on_charger' property
* is present and charger inserted. This ensures that recharge
* starts once battery SOC falls below resume_soc.
*/
bms_suspend_config = chip->dt.cfg_force_bms_active_on_charger
&& charger_present;
chip->apply_suspend_config = false;
if (!battery_charging && !hi_power_state && !bms_suspend_config)
chip->apply_suspend_config = true;
pr_debug("battery_charging=%d power_state=%s hi_power_state=0x%x apply_suspend_config=%d bms_suspend_config=%d usb_present=%d\n",
battery_charging, hi_power_state ? "hi" : "low",
chip->hi_power_state,
chip->apply_suspend_config, bms_suspend_config,
charger_present);
if (chip->apply_suspend_config) {
if (chip->dt.cfg_force_s3_on_suspend) {
pr_debug("Forcing S3 state\n");
mutex_lock(&chip->state_change_mutex);
force_fsm_state(chip, S3_STATE);
mutex_unlock(&chip->state_change_mutex);
/* Store accumulated data if any */
process_suspend_data(chip);
}
}
cancel_delayed_work_sync(&chip->monitor_soc_work);
return 0;
}
static int bms_resume(struct device *dev)
{
u8 state = 0;
int rc, monitor_soc_delay = 0;
unsigned long tm_now_sec;
struct qpnp_bms_chip *chip = dev_get_drvdata(dev);
if (chip->apply_suspend_config) {
if (chip->dt.cfg_force_s3_on_suspend) {
/*
* Update the state to S2 only if we are in S3. There is
* a possibility of being in S2 if we resumed on
* a charger insertion
*/
mutex_lock(&chip->state_change_mutex);
rc = get_fsm_state(chip, &state);
if (rc)
pr_err("Unable to get FSM state rc=%d\n", rc);
if (rc || (state == S3_STATE)) {
pr_debug("Unforcing S3 state, setting S2 state\n");
force_fsm_state(chip, S2_STATE);
}
mutex_unlock(&chip->state_change_mutex);
/*
* if we were charging while suspended, we will
* be woken up by the fifo done interrupt and no
* additional processing is needed.
*/
process_resume_data(chip);
}
}
/* Start monitor_soc_work based on when it last executed */
rc = get_current_time(&tm_now_sec);
if (rc) {
pr_err("Could not read current time: %d\n", rc);
} else {
monitor_soc_delay = get_calculation_delay_ms(chip) -
((tm_now_sec - chip->tm_sec) * 1000);
monitor_soc_delay = max(0, monitor_soc_delay);
}
pr_debug("monitor_soc_delay_sec=%d tm_now_sec=%ld chip->tm_sec=%ld\n",
monitor_soc_delay / 1000, tm_now_sec, chip->tm_sec);
schedule_delayed_work(&chip->monitor_soc_work,
msecs_to_jiffies(monitor_soc_delay));
return 0;
}
static const struct dev_pm_ops qpnp_vm_bms_pm_ops = {
.suspend = bms_suspend,
.resume = bms_resume,
};
static struct of_device_id qpnp_vm_bms_match_table[] = {
{ .compatible = QPNP_VM_BMS_DEV_NAME },
{}
};
static struct spmi_driver qpnp_vm_bms_driver = {
.probe = qpnp_vm_bms_probe,
.remove = qpnp_vm_bms_remove,
.driver = {
.name = QPNP_VM_BMS_DEV_NAME,
.owner = THIS_MODULE,
.of_match_table = qpnp_vm_bms_match_table,
.pm = &qpnp_vm_bms_pm_ops,
},
};
static int __init qpnp_vm_bms_init(void)
{
return spmi_driver_register(&qpnp_vm_bms_driver);
}
module_init(qpnp_vm_bms_init);
static void __exit qpnp_vm_bms_exit(void)
{
return spmi_driver_unregister(&qpnp_vm_bms_driver);
}
module_exit(qpnp_vm_bms_exit);
MODULE_DESCRIPTION("QPNP VM-BMS Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" QPNP_VM_BMS_DEV_NAME);
| gpl-2.0 |
redstar3894/android-gcc-4.6 | zlib/contrib/minizip/zip.c | 498 | 37353 | /* zip.c -- IO on .zip files using zlib
Version 1.01e, February 12th, 2005
27 Dec 2004 Rolf Kalbermatter
Modification to zipOpen2 to support globalComment retrieval.
Copyright (C) 1998-2005 Gilles Vollant
Read zip.h for more info
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "zlib.h"
#include "zip.h"
#ifdef STDC
# include <stddef.h>
# include <string.h>
# include <stdlib.h>
#endif
#ifdef NO_ERRNO_H
extern int errno;
#else
# include <errno.h>
#endif
#ifndef local
# define local static
#endif
/* compile with -Dlocal if your debugger can't find static symbols */
#ifndef VERSIONMADEBY
# define VERSIONMADEBY (0x0) /* platform depedent */
#endif
#ifndef Z_BUFSIZE
#define Z_BUFSIZE (16384)
#endif
#ifndef Z_MAXFILENAMEINZIP
#define Z_MAXFILENAMEINZIP (256)
#endif
#ifndef ALLOC
# define ALLOC(size) (malloc(size))
#endif
#ifndef TRYFREE
# define TRYFREE(p) {if (p) free(p);}
#endif
/*
#define SIZECENTRALDIRITEM (0x2e)
#define SIZEZIPLOCALHEADER (0x1e)
*/
/* I've found an old Unix (a SunOS 4.1.3_U1) without all SEEK_* defined.... */
#ifndef SEEK_CUR
#define SEEK_CUR 1
#endif
#ifndef SEEK_END
#define SEEK_END 2
#endif
#ifndef SEEK_SET
#define SEEK_SET 0
#endif
#ifndef DEF_MEM_LEVEL
#if MAX_MEM_LEVEL >= 8
# define DEF_MEM_LEVEL 8
#else
# define DEF_MEM_LEVEL MAX_MEM_LEVEL
#endif
#endif
const char zip_copyright[] =
" zip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll";
#define SIZEDATA_INDATABLOCK (4096-(4*4))
#define LOCALHEADERMAGIC (0x04034b50)
#define CENTRALHEADERMAGIC (0x02014b50)
#define ENDHEADERMAGIC (0x06054b50)
#define FLAG_LOCALHEADER_OFFSET (0x06)
#define CRC_LOCALHEADER_OFFSET (0x0e)
#define SIZECENTRALHEADER (0x2e) /* 46 */
typedef struct linkedlist_datablock_internal_s
{
struct linkedlist_datablock_internal_s* next_datablock;
uLong avail_in_this_block;
uLong filled_in_this_block;
uLong unused; /* for future use and alignement */
unsigned char data[SIZEDATA_INDATABLOCK];
} linkedlist_datablock_internal;
typedef struct linkedlist_data_s
{
linkedlist_datablock_internal* first_block;
linkedlist_datablock_internal* last_block;
} linkedlist_data;
typedef struct
{
z_stream stream; /* zLib stream structure for inflate */
int stream_initialised; /* 1 is stream is initialised */
uInt pos_in_buffered_data; /* last written byte in buffered_data */
uLong pos_local_header; /* offset of the local header of the file
currenty writing */
char* central_header; /* central header data for the current file */
uLong size_centralheader; /* size of the central header for cur file */
uLong flag; /* flag of the file currently writing */
int method; /* compression method of file currenty wr.*/
int raw; /* 1 for directly writing raw data */
Byte buffered_data[Z_BUFSIZE];/* buffer contain compressed data to be writ*/
uLong dosDate;
uLong crc32;
int encrypt;
#ifndef NOCRYPT
unsigned long keys[3]; /* keys defining the pseudo-random sequence */
const unsigned long* pcrc_32_tab;
int crypt_header_size;
#endif
} curfile_info;
typedef struct
{
zlib_filefunc_def z_filefunc;
voidpf filestream; /* io structore of the zipfile */
linkedlist_data central_dir;/* datablock with central dir in construction*/
int in_opened_file_inzip; /* 1 if a file in the zip is currently writ.*/
curfile_info ci; /* info on the file curretly writing */
uLong begin_pos; /* position of the beginning of the zipfile */
uLong add_position_when_writting_offset;
uLong number_entry;
#ifndef NO_ADDFILEINEXISTINGZIP
char *globalcomment;
#endif
} zip_internal;
#ifndef NOCRYPT
#define INCLUDECRYPTINGCODE_IFCRYPTALLOWED
#include "crypt.h"
#endif
local linkedlist_datablock_internal* allocate_new_datablock()
{
linkedlist_datablock_internal* ldi;
ldi = (linkedlist_datablock_internal*)
ALLOC(sizeof(linkedlist_datablock_internal));
if (ldi!=NULL)
{
ldi->next_datablock = NULL ;
ldi->filled_in_this_block = 0 ;
ldi->avail_in_this_block = SIZEDATA_INDATABLOCK ;
}
return ldi;
}
local void free_datablock(ldi)
linkedlist_datablock_internal* ldi;
{
while (ldi!=NULL)
{
linkedlist_datablock_internal* ldinext = ldi->next_datablock;
TRYFREE(ldi);
ldi = ldinext;
}
}
local void init_linkedlist(ll)
linkedlist_data* ll;
{
ll->first_block = ll->last_block = NULL;
}
local void free_linkedlist(ll)
linkedlist_data* ll;
{
free_datablock(ll->first_block);
ll->first_block = ll->last_block = NULL;
}
local int add_data_in_datablock(ll,buf,len)
linkedlist_data* ll;
const void* buf;
uLong len;
{
linkedlist_datablock_internal* ldi;
const unsigned char* from_copy;
if (ll==NULL)
return ZIP_INTERNALERROR;
if (ll->last_block == NULL)
{
ll->first_block = ll->last_block = allocate_new_datablock();
if (ll->first_block == NULL)
return ZIP_INTERNALERROR;
}
ldi = ll->last_block;
from_copy = (unsigned char*)buf;
while (len>0)
{
uInt copy_this;
uInt i;
unsigned char* to_copy;
if (ldi->avail_in_this_block==0)
{
ldi->next_datablock = allocate_new_datablock();
if (ldi->next_datablock == NULL)
return ZIP_INTERNALERROR;
ldi = ldi->next_datablock ;
ll->last_block = ldi;
}
if (ldi->avail_in_this_block < len)
copy_this = (uInt)ldi->avail_in_this_block;
else
copy_this = (uInt)len;
to_copy = &(ldi->data[ldi->filled_in_this_block]);
for (i=0;i<copy_this;i++)
*(to_copy+i)=*(from_copy+i);
ldi->filled_in_this_block += copy_this;
ldi->avail_in_this_block -= copy_this;
from_copy += copy_this ;
len -= copy_this;
}
return ZIP_OK;
}
/****************************************************************************/
#ifndef NO_ADDFILEINEXISTINGZIP
/* ===========================================================================
Inputs a long in LSB order to the given file
nbByte == 1, 2 or 4 (byte, short or long)
*/
local int ziplocal_putValue OF((const zlib_filefunc_def* pzlib_filefunc_def,
voidpf filestream, uLong x, int nbByte));
local int ziplocal_putValue (pzlib_filefunc_def, filestream, x, nbByte)
const zlib_filefunc_def* pzlib_filefunc_def;
voidpf filestream;
uLong x;
int nbByte;
{
unsigned char buf[4];
int n;
for (n = 0; n < nbByte; n++)
{
buf[n] = (unsigned char)(x & 0xff);
x >>= 8;
}
if (x != 0)
{ /* data overflow - hack for ZIP64 (X Roche) */
for (n = 0; n < nbByte; n++)
{
buf[n] = 0xff;
}
}
if (ZWRITE(*pzlib_filefunc_def,filestream,buf,nbByte)!=(uLong)nbByte)
return ZIP_ERRNO;
else
return ZIP_OK;
}
local void ziplocal_putValue_inmemory OF((void* dest, uLong x, int nbByte));
local void ziplocal_putValue_inmemory (dest, x, nbByte)
void* dest;
uLong x;
int nbByte;
{
unsigned char* buf=(unsigned char*)dest;
int n;
for (n = 0; n < nbByte; n++) {
buf[n] = (unsigned char)(x & 0xff);
x >>= 8;
}
if (x != 0)
{ /* data overflow - hack for ZIP64 */
for (n = 0; n < nbByte; n++)
{
buf[n] = 0xff;
}
}
}
/****************************************************************************/
local uLong ziplocal_TmzDateToDosDate(ptm,dosDate)
const tm_zip* ptm;
uLong dosDate;
{
uLong year = (uLong)ptm->tm_year;
if (year>1980)
year-=1980;
else if (year>80)
year-=80;
return
(uLong) (((ptm->tm_mday) + (32 * (ptm->tm_mon+1)) + (512 * year)) << 16) |
((ptm->tm_sec/2) + (32* ptm->tm_min) + (2048 * (uLong)ptm->tm_hour));
}
/****************************************************************************/
local int ziplocal_getByte OF((
const zlib_filefunc_def* pzlib_filefunc_def,
voidpf filestream,
int *pi));
local int ziplocal_getByte(pzlib_filefunc_def,filestream,pi)
const zlib_filefunc_def* pzlib_filefunc_def;
voidpf filestream;
int *pi;
{
unsigned char c;
int err = (int)ZREAD(*pzlib_filefunc_def,filestream,&c,1);
if (err==1)
{
*pi = (int)c;
return ZIP_OK;
}
else
{
if (ZERROR(*pzlib_filefunc_def,filestream))
return ZIP_ERRNO;
else
return ZIP_EOF;
}
}
/* ===========================================================================
Reads a long in LSB order from the given gz_stream. Sets
*/
local int ziplocal_getShort OF((
const zlib_filefunc_def* pzlib_filefunc_def,
voidpf filestream,
uLong *pX));
local int ziplocal_getShort (pzlib_filefunc_def,filestream,pX)
const zlib_filefunc_def* pzlib_filefunc_def;
voidpf filestream;
uLong *pX;
{
uLong x ;
int i;
int err;
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x = (uLong)i;
if (err==ZIP_OK)
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x += ((uLong)i)<<8;
if (err==ZIP_OK)
*pX = x;
else
*pX = 0;
return err;
}
local int ziplocal_getLong OF((
const zlib_filefunc_def* pzlib_filefunc_def,
voidpf filestream,
uLong *pX));
local int ziplocal_getLong (pzlib_filefunc_def,filestream,pX)
const zlib_filefunc_def* pzlib_filefunc_def;
voidpf filestream;
uLong *pX;
{
uLong x ;
int i;
int err;
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x = (uLong)i;
if (err==ZIP_OK)
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x += ((uLong)i)<<8;
if (err==ZIP_OK)
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x += ((uLong)i)<<16;
if (err==ZIP_OK)
err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i);
x += ((uLong)i)<<24;
if (err==ZIP_OK)
*pX = x;
else
*pX = 0;
return err;
}
#ifndef BUFREADCOMMENT
#define BUFREADCOMMENT (0x400)
#endif
/*
Locate the Central directory of a zipfile (at the end, just before
the global comment)
*/
local uLong ziplocal_SearchCentralDir OF((
const zlib_filefunc_def* pzlib_filefunc_def,
voidpf filestream));
local uLong ziplocal_SearchCentralDir(pzlib_filefunc_def,filestream)
const zlib_filefunc_def* pzlib_filefunc_def;
voidpf filestream;
{
unsigned char* buf;
uLong uSizeFile;
uLong uBackRead;
uLong uMaxBack=0xffff; /* maximum size of global comment */
uLong uPosFound=0;
if (ZSEEK(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0)
return 0;
uSizeFile = ZTELL(*pzlib_filefunc_def,filestream);
if (uMaxBack>uSizeFile)
uMaxBack = uSizeFile;
buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4);
if (buf==NULL)
return 0;
uBackRead = 4;
while (uBackRead<uMaxBack)
{
uLong uReadSize,uReadPos ;
int i;
if (uBackRead+BUFREADCOMMENT>uMaxBack)
uBackRead = uMaxBack;
else
uBackRead+=BUFREADCOMMENT;
uReadPos = uSizeFile-uBackRead ;
uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ?
(BUFREADCOMMENT+4) : (uSizeFile-uReadPos);
if (ZSEEK(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0)
break;
if (ZREAD(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize)
break;
for (i=(int)uReadSize-3; (i--)>0;)
if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) &&
((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06))
{
uPosFound = uReadPos+i;
break;
}
if (uPosFound!=0)
break;
}
TRYFREE(buf);
return uPosFound;
}
#endif /* !NO_ADDFILEINEXISTINGZIP*/
/************************************************************/
extern zipFile ZEXPORT zipOpen2 (pathname, append, globalcomment, pzlib_filefunc_def)
const char *pathname;
int append;
zipcharpc* globalcomment;
zlib_filefunc_def* pzlib_filefunc_def;
{
zip_internal ziinit;
zip_internal* zi;
int err=ZIP_OK;
if (pzlib_filefunc_def==NULL)
fill_fopen_filefunc(&ziinit.z_filefunc);
else
ziinit.z_filefunc = *pzlib_filefunc_def;
ziinit.filestream = (*(ziinit.z_filefunc.zopen_file))
(ziinit.z_filefunc.opaque,
pathname,
(append == APPEND_STATUS_CREATE) ?
(ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_WRITE | ZLIB_FILEFUNC_MODE_CREATE) :
(ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_WRITE | ZLIB_FILEFUNC_MODE_EXISTING));
if (ziinit.filestream == NULL)
return NULL;
ziinit.begin_pos = ZTELL(ziinit.z_filefunc,ziinit.filestream);
ziinit.in_opened_file_inzip = 0;
ziinit.ci.stream_initialised = 0;
ziinit.number_entry = 0;
ziinit.add_position_when_writting_offset = 0;
init_linkedlist(&(ziinit.central_dir));
zi = (zip_internal*)ALLOC(sizeof(zip_internal));
if (zi==NULL)
{
ZCLOSE(ziinit.z_filefunc,ziinit.filestream);
return NULL;
}
/* now we add file in a zipfile */
# ifndef NO_ADDFILEINEXISTINGZIP
ziinit.globalcomment = NULL;
if (append == APPEND_STATUS_ADDINZIP)
{
uLong byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/
uLong size_central_dir; /* size of the central directory */
uLong offset_central_dir; /* offset of start of central directory */
uLong central_pos,uL;
uLong number_disk; /* number of the current dist, used for
spaning ZIP, unsupported, always 0*/
uLong number_disk_with_CD; /* number the the disk with central dir, used
for spaning ZIP, unsupported, always 0*/
uLong number_entry;
uLong number_entry_CD; /* total number of entries in
the central dir
(same than number_entry on nospan) */
uLong size_comment;
central_pos = ziplocal_SearchCentralDir(&ziinit.z_filefunc,ziinit.filestream);
if (central_pos==0)
err=ZIP_ERRNO;
if (ZSEEK(ziinit.z_filefunc, ziinit.filestream,
central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0)
err=ZIP_ERRNO;
/* the signature, already checked */
if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&uL)!=ZIP_OK)
err=ZIP_ERRNO;
/* number of this disk */
if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_disk)!=ZIP_OK)
err=ZIP_ERRNO;
/* number of the disk with the start of the central directory */
if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_disk_with_CD)!=ZIP_OK)
err=ZIP_ERRNO;
/* total number of entries in the central dir on this disk */
if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_entry)!=ZIP_OK)
err=ZIP_ERRNO;
/* total number of entries in the central dir */
if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_entry_CD)!=ZIP_OK)
err=ZIP_ERRNO;
if ((number_entry_CD!=number_entry) ||
(number_disk_with_CD!=0) ||
(number_disk!=0))
err=ZIP_BADZIPFILE;
/* size of the central directory */
if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&size_central_dir)!=ZIP_OK)
err=ZIP_ERRNO;
/* offset of start of central directory with respect to the
starting disk number */
if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&offset_central_dir)!=ZIP_OK)
err=ZIP_ERRNO;
/* zipfile global comment length */
if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&size_comment)!=ZIP_OK)
err=ZIP_ERRNO;
if ((central_pos<offset_central_dir+size_central_dir) &&
(err==ZIP_OK))
err=ZIP_BADZIPFILE;
if (err!=ZIP_OK)
{
ZCLOSE(ziinit.z_filefunc, ziinit.filestream);
return NULL;
}
if (size_comment>0)
{
ziinit.globalcomment = ALLOC(size_comment+1);
if (ziinit.globalcomment)
{
size_comment = ZREAD(ziinit.z_filefunc, ziinit.filestream,ziinit.globalcomment,size_comment);
ziinit.globalcomment[size_comment]=0;
}
}
byte_before_the_zipfile = central_pos -
(offset_central_dir+size_central_dir);
ziinit.add_position_when_writting_offset = byte_before_the_zipfile;
{
uLong size_central_dir_to_read = size_central_dir;
size_t buf_size = SIZEDATA_INDATABLOCK;
void* buf_read = (void*)ALLOC(buf_size);
if (ZSEEK(ziinit.z_filefunc, ziinit.filestream,
offset_central_dir + byte_before_the_zipfile,
ZLIB_FILEFUNC_SEEK_SET) != 0)
err=ZIP_ERRNO;
while ((size_central_dir_to_read>0) && (err==ZIP_OK))
{
uLong read_this = SIZEDATA_INDATABLOCK;
if (read_this > size_central_dir_to_read)
read_this = size_central_dir_to_read;
if (ZREAD(ziinit.z_filefunc, ziinit.filestream,buf_read,read_this) != read_this)
err=ZIP_ERRNO;
if (err==ZIP_OK)
err = add_data_in_datablock(&ziinit.central_dir,buf_read,
(uLong)read_this);
size_central_dir_to_read-=read_this;
}
TRYFREE(buf_read);
}
ziinit.begin_pos = byte_before_the_zipfile;
ziinit.number_entry = number_entry_CD;
if (ZSEEK(ziinit.z_filefunc, ziinit.filestream,
offset_central_dir+byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET)!=0)
err=ZIP_ERRNO;
}
if (globalcomment)
{
*globalcomment = ziinit.globalcomment;
}
# endif /* !NO_ADDFILEINEXISTINGZIP*/
if (err != ZIP_OK)
{
# ifndef NO_ADDFILEINEXISTINGZIP
TRYFREE(ziinit.globalcomment);
# endif /* !NO_ADDFILEINEXISTINGZIP*/
TRYFREE(zi);
return NULL;
}
else
{
*zi = ziinit;
return (zipFile)zi;
}
}
extern zipFile ZEXPORT zipOpen (pathname, append)
const char *pathname;
int append;
{
return zipOpen2(pathname,append,NULL,NULL);
}
extern int ZEXPORT zipOpenNewFileInZip3 (file, filename, zipfi,
extrafield_local, size_extrafield_local,
extrafield_global, size_extrafield_global,
comment, method, level, raw,
windowBits, memLevel, strategy,
password, crcForCrypting)
zipFile file;
const char* filename;
const zip_fileinfo* zipfi;
const void* extrafield_local;
uInt size_extrafield_local;
const void* extrafield_global;
uInt size_extrafield_global;
const char* comment;
int method;
int level;
int raw;
int windowBits;
int memLevel;
int strategy;
const char* password;
uLong crcForCrypting;
{
zip_internal* zi;
uInt size_filename;
uInt size_comment;
uInt i;
int err = ZIP_OK;
# ifdef NOCRYPT
if (password != NULL)
return ZIP_PARAMERROR;
# endif
if (file == NULL)
return ZIP_PARAMERROR;
if ((method!=0) && (method!=Z_DEFLATED))
return ZIP_PARAMERROR;
zi = (zip_internal*)file;
if (zi->in_opened_file_inzip == 1)
{
err = zipCloseFileInZip (file);
if (err != ZIP_OK)
return err;
}
if (filename==NULL)
filename="-";
if (comment==NULL)
size_comment = 0;
else
size_comment = (uInt)strlen(comment);
size_filename = (uInt)strlen(filename);
if (zipfi == NULL)
zi->ci.dosDate = 0;
else
{
if (zipfi->dosDate != 0)
zi->ci.dosDate = zipfi->dosDate;
else zi->ci.dosDate = ziplocal_TmzDateToDosDate(&zipfi->tmz_date,zipfi->dosDate);
}
zi->ci.flag = 0;
if ((level==8) || (level==9))
zi->ci.flag |= 2;
if ((level==2))
zi->ci.flag |= 4;
if ((level==1))
zi->ci.flag |= 6;
if (password != NULL)
zi->ci.flag |= 1;
zi->ci.crc32 = 0;
zi->ci.method = method;
zi->ci.encrypt = 0;
zi->ci.stream_initialised = 0;
zi->ci.pos_in_buffered_data = 0;
zi->ci.raw = raw;
zi->ci.pos_local_header = ZTELL(zi->z_filefunc,zi->filestream) ;
zi->ci.size_centralheader = SIZECENTRALHEADER + size_filename +
size_extrafield_global + size_comment;
zi->ci.central_header = (char*)ALLOC((uInt)zi->ci.size_centralheader);
ziplocal_putValue_inmemory(zi->ci.central_header,(uLong)CENTRALHEADERMAGIC,4);
/* version info */
ziplocal_putValue_inmemory(zi->ci.central_header+4,(uLong)VERSIONMADEBY,2);
ziplocal_putValue_inmemory(zi->ci.central_header+6,(uLong)20,2);
ziplocal_putValue_inmemory(zi->ci.central_header+8,(uLong)zi->ci.flag,2);
ziplocal_putValue_inmemory(zi->ci.central_header+10,(uLong)zi->ci.method,2);
ziplocal_putValue_inmemory(zi->ci.central_header+12,(uLong)zi->ci.dosDate,4);
ziplocal_putValue_inmemory(zi->ci.central_header+16,(uLong)0,4); /*crc*/
ziplocal_putValue_inmemory(zi->ci.central_header+20,(uLong)0,4); /*compr size*/
ziplocal_putValue_inmemory(zi->ci.central_header+24,(uLong)0,4); /*uncompr size*/
ziplocal_putValue_inmemory(zi->ci.central_header+28,(uLong)size_filename,2);
ziplocal_putValue_inmemory(zi->ci.central_header+30,(uLong)size_extrafield_global,2);
ziplocal_putValue_inmemory(zi->ci.central_header+32,(uLong)size_comment,2);
ziplocal_putValue_inmemory(zi->ci.central_header+34,(uLong)0,2); /*disk nm start*/
if (zipfi==NULL)
ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)0,2);
else
ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)zipfi->internal_fa,2);
if (zipfi==NULL)
ziplocal_putValue_inmemory(zi->ci.central_header+38,(uLong)0,4);
else
ziplocal_putValue_inmemory(zi->ci.central_header+38,(uLong)zipfi->external_fa,4);
ziplocal_putValue_inmemory(zi->ci.central_header+42,(uLong)zi->ci.pos_local_header- zi->add_position_when_writting_offset,4);
for (i=0;i<size_filename;i++)
*(zi->ci.central_header+SIZECENTRALHEADER+i) = *(filename+i);
for (i=0;i<size_extrafield_global;i++)
*(zi->ci.central_header+SIZECENTRALHEADER+size_filename+i) =
*(((const char*)extrafield_global)+i);
for (i=0;i<size_comment;i++)
*(zi->ci.central_header+SIZECENTRALHEADER+size_filename+
size_extrafield_global+i) = *(comment+i);
if (zi->ci.central_header == NULL)
return ZIP_INTERNALERROR;
/* write the local header */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)LOCALHEADERMAGIC,4);
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)20,2);/* version needed to extract */
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.flag,2);
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.method,2);
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.dosDate,4);
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* crc 32, unknown */
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* compressed size, unknown */
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* uncompressed size, unknown */
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_filename,2);
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_extrafield_local,2);
if ((err==ZIP_OK) && (size_filename>0))
if (ZWRITE(zi->z_filefunc,zi->filestream,filename,size_filename)!=size_filename)
err = ZIP_ERRNO;
if ((err==ZIP_OK) && (size_extrafield_local>0))
if (ZWRITE(zi->z_filefunc,zi->filestream,extrafield_local,size_extrafield_local)
!=size_extrafield_local)
err = ZIP_ERRNO;
zi->ci.stream.avail_in = (uInt)0;
zi->ci.stream.avail_out = (uInt)Z_BUFSIZE;
zi->ci.stream.next_out = zi->ci.buffered_data;
zi->ci.stream.total_in = 0;
zi->ci.stream.total_out = 0;
if ((err==ZIP_OK) && (zi->ci.method == Z_DEFLATED) && (!zi->ci.raw))
{
zi->ci.stream.zalloc = (alloc_func)0;
zi->ci.stream.zfree = (free_func)0;
zi->ci.stream.opaque = (voidpf)0;
if (windowBits>0)
windowBits = -windowBits;
err = deflateInit2(&zi->ci.stream, level,
Z_DEFLATED, windowBits, memLevel, strategy);
if (err==Z_OK)
zi->ci.stream_initialised = 1;
}
# ifndef NOCRYPT
zi->ci.crypt_header_size = 0;
if ((err==Z_OK) && (password != NULL))
{
unsigned char bufHead[RAND_HEAD_LEN];
unsigned int sizeHead;
zi->ci.encrypt = 1;
zi->ci.pcrc_32_tab = get_crc_table();
/*init_keys(password,zi->ci.keys,zi->ci.pcrc_32_tab);*/
sizeHead=crypthead(password,bufHead,RAND_HEAD_LEN,zi->ci.keys,zi->ci.pcrc_32_tab,crcForCrypting);
zi->ci.crypt_header_size = sizeHead;
if (ZWRITE(zi->z_filefunc,zi->filestream,bufHead,sizeHead) != sizeHead)
err = ZIP_ERRNO;
}
# endif
if (err==Z_OK)
zi->in_opened_file_inzip = 1;
return err;
}
extern int ZEXPORT zipOpenNewFileInZip2(file, filename, zipfi,
extrafield_local, size_extrafield_local,
extrafield_global, size_extrafield_global,
comment, method, level, raw)
zipFile file;
const char* filename;
const zip_fileinfo* zipfi;
const void* extrafield_local;
uInt size_extrafield_local;
const void* extrafield_global;
uInt size_extrafield_global;
const char* comment;
int method;
int level;
int raw;
{
return zipOpenNewFileInZip3 (file, filename, zipfi,
extrafield_local, size_extrafield_local,
extrafield_global, size_extrafield_global,
comment, method, level, raw,
-MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY,
NULL, 0);
}
extern int ZEXPORT zipOpenNewFileInZip (file, filename, zipfi,
extrafield_local, size_extrafield_local,
extrafield_global, size_extrafield_global,
comment, method, level)
zipFile file;
const char* filename;
const zip_fileinfo* zipfi;
const void* extrafield_local;
uInt size_extrafield_local;
const void* extrafield_global;
uInt size_extrafield_global;
const char* comment;
int method;
int level;
{
return zipOpenNewFileInZip2 (file, filename, zipfi,
extrafield_local, size_extrafield_local,
extrafield_global, size_extrafield_global,
comment, method, level, 0);
}
local int zipFlushWriteBuffer(zi)
zip_internal* zi;
{
int err=ZIP_OK;
if (zi->ci.encrypt != 0)
{
#ifndef NOCRYPT
uInt i;
int t;
for (i=0;i<zi->ci.pos_in_buffered_data;i++)
zi->ci.buffered_data[i] = zencode(zi->ci.keys, zi->ci.pcrc_32_tab,
zi->ci.buffered_data[i],t);
#endif
}
if (ZWRITE(zi->z_filefunc,zi->filestream,zi->ci.buffered_data,zi->ci.pos_in_buffered_data)
!=zi->ci.pos_in_buffered_data)
err = ZIP_ERRNO;
zi->ci.pos_in_buffered_data = 0;
return err;
}
extern int ZEXPORT zipWriteInFileInZip (file, buf, len)
zipFile file;
const void* buf;
unsigned len;
{
zip_internal* zi;
int err=ZIP_OK;
if (file == NULL)
return ZIP_PARAMERROR;
zi = (zip_internal*)file;
if (zi->in_opened_file_inzip == 0)
return ZIP_PARAMERROR;
zi->ci.stream.next_in = (void*)buf;
zi->ci.stream.avail_in = len;
zi->ci.crc32 = crc32(zi->ci.crc32,buf,len);
while ((err==ZIP_OK) && (zi->ci.stream.avail_in>0))
{
if (zi->ci.stream.avail_out == 0)
{
if (zipFlushWriteBuffer(zi) == ZIP_ERRNO)
err = ZIP_ERRNO;
zi->ci.stream.avail_out = (uInt)Z_BUFSIZE;
zi->ci.stream.next_out = zi->ci.buffered_data;
}
if(err != ZIP_OK)
break;
if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw))
{
uLong uTotalOutBefore = zi->ci.stream.total_out;
err=deflate(&zi->ci.stream, Z_NO_FLUSH);
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
}
else
{
uInt copy_this,i;
if (zi->ci.stream.avail_in < zi->ci.stream.avail_out)
copy_this = zi->ci.stream.avail_in;
else
copy_this = zi->ci.stream.avail_out;
for (i=0;i<copy_this;i++)
*(((char*)zi->ci.stream.next_out)+i) =
*(((const char*)zi->ci.stream.next_in)+i);
{
zi->ci.stream.avail_in -= copy_this;
zi->ci.stream.avail_out-= copy_this;
zi->ci.stream.next_in+= copy_this;
zi->ci.stream.next_out+= copy_this;
zi->ci.stream.total_in+= copy_this;
zi->ci.stream.total_out+= copy_this;
zi->ci.pos_in_buffered_data += copy_this;
}
}
}
return err;
}
extern int ZEXPORT zipCloseFileInZipRaw (file, uncompressed_size, crc32)
zipFile file;
uLong uncompressed_size;
uLong crc32;
{
zip_internal* zi;
uLong compressed_size;
int err=ZIP_OK;
if (file == NULL)
return ZIP_PARAMERROR;
zi = (zip_internal*)file;
if (zi->in_opened_file_inzip == 0)
return ZIP_PARAMERROR;
zi->ci.stream.avail_in = 0;
if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw))
while (err==ZIP_OK)
{
uLong uTotalOutBefore;
if (zi->ci.stream.avail_out == 0)
{
if (zipFlushWriteBuffer(zi) == ZIP_ERRNO)
err = ZIP_ERRNO;
zi->ci.stream.avail_out = (uInt)Z_BUFSIZE;
zi->ci.stream.next_out = zi->ci.buffered_data;
}
uTotalOutBefore = zi->ci.stream.total_out;
err=deflate(&zi->ci.stream, Z_FINISH);
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
}
if (err==Z_STREAM_END)
err=ZIP_OK; /* this is normal */
if ((zi->ci.pos_in_buffered_data>0) && (err==ZIP_OK))
if (zipFlushWriteBuffer(zi)==ZIP_ERRNO)
err = ZIP_ERRNO;
if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw))
{
err=deflateEnd(&zi->ci.stream);
zi->ci.stream_initialised = 0;
}
if (!zi->ci.raw)
{
crc32 = (uLong)zi->ci.crc32;
uncompressed_size = (uLong)zi->ci.stream.total_in;
}
compressed_size = (uLong)zi->ci.stream.total_out;
# ifndef NOCRYPT
compressed_size += zi->ci.crypt_header_size;
# endif
ziplocal_putValue_inmemory(zi->ci.central_header+16,crc32,4); /*crc*/
ziplocal_putValue_inmemory(zi->ci.central_header+20,
compressed_size,4); /*compr size*/
if (zi->ci.stream.data_type == Z_ASCII)
ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)Z_ASCII,2);
ziplocal_putValue_inmemory(zi->ci.central_header+24,
uncompressed_size,4); /*uncompr size*/
if (err==ZIP_OK)
err = add_data_in_datablock(&zi->central_dir,zi->ci.central_header,
(uLong)zi->ci.size_centralheader);
free(zi->ci.central_header);
if (err==ZIP_OK)
{
long cur_pos_inzip = ZTELL(zi->z_filefunc,zi->filestream);
if (ZSEEK(zi->z_filefunc,zi->filestream,
zi->ci.pos_local_header + 14,ZLIB_FILEFUNC_SEEK_SET)!=0)
err = ZIP_ERRNO;
if (err==ZIP_OK)
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,crc32,4); /* crc 32, unknown */
if (err==ZIP_OK) /* compressed size, unknown */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,compressed_size,4);
if (err==ZIP_OK) /* uncompressed size, unknown */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,uncompressed_size,4);
if (ZSEEK(zi->z_filefunc,zi->filestream,
cur_pos_inzip,ZLIB_FILEFUNC_SEEK_SET)!=0)
err = ZIP_ERRNO;
}
zi->number_entry ++;
zi->in_opened_file_inzip = 0;
return err;
}
extern int ZEXPORT zipCloseFileInZip (file)
zipFile file;
{
return zipCloseFileInZipRaw (file,0,0);
}
extern int ZEXPORT zipClose (file, global_comment)
zipFile file;
const char* global_comment;
{
zip_internal* zi;
int err = 0;
uLong size_centraldir = 0;
uLong centraldir_pos_inzip;
uInt size_global_comment;
if (file == NULL)
return ZIP_PARAMERROR;
zi = (zip_internal*)file;
if (zi->in_opened_file_inzip == 1)
{
err = zipCloseFileInZip (file);
}
#ifndef NO_ADDFILEINEXISTINGZIP
if (global_comment==NULL)
global_comment = zi->globalcomment;
#endif
if (global_comment==NULL)
size_global_comment = 0;
else
size_global_comment = (uInt)strlen(global_comment);
centraldir_pos_inzip = ZTELL(zi->z_filefunc,zi->filestream);
if (err==ZIP_OK)
{
linkedlist_datablock_internal* ldi = zi->central_dir.first_block ;
while (ldi!=NULL)
{
if ((err==ZIP_OK) && (ldi->filled_in_this_block>0))
if (ZWRITE(zi->z_filefunc,zi->filestream,
ldi->data,ldi->filled_in_this_block)
!=ldi->filled_in_this_block )
err = ZIP_ERRNO;
size_centraldir += ldi->filled_in_this_block;
ldi = ldi->next_datablock;
}
}
free_datablock(zi->central_dir.first_block);
if (err==ZIP_OK) /* Magic End */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)ENDHEADERMAGIC,4);
if (err==ZIP_OK) /* number of this disk */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2);
if (err==ZIP_OK) /* number of the disk with the start of the central directory */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2);
if (err==ZIP_OK) /* total number of entries in the central dir on this disk */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2);
if (err==ZIP_OK) /* total number of entries in the central dir */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2);
if (err==ZIP_OK) /* size of the central directory */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_centraldir,4);
if (err==ZIP_OK) /* offset of start of central directory with respect to the
starting disk number */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,
(uLong)(centraldir_pos_inzip - zi->add_position_when_writting_offset),4);
if (err==ZIP_OK) /* zipfile comment length */
err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_global_comment,2);
if ((err==ZIP_OK) && (size_global_comment>0))
if (ZWRITE(zi->z_filefunc,zi->filestream,
global_comment,size_global_comment) != size_global_comment)
err = ZIP_ERRNO;
if (ZCLOSE(zi->z_filefunc,zi->filestream) != 0)
if (err == ZIP_OK)
err = ZIP_ERRNO;
#ifndef NO_ADDFILEINEXISTINGZIP
TRYFREE(zi->globalcomment);
#endif
TRYFREE(zi);
return err;
}
| gpl-2.0 |
jstotero/Old_Cucciolone | drivers/net/wireless/ath/ath9k/gpio.c | 754 | 12154 | /*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "ath9k.h"
/********************************/
/* LED functions */
/********************************/
static void ath_led_blink_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
ath_led_blink_work.work);
if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
return;
if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
(sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
else
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
(sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
ieee80211_queue_delayed_work(sc->hw,
&sc->ath_led_blink_work,
(sc->sc_flags & SC_OP_LED_ON) ?
msecs_to_jiffies(sc->led_off_duration) :
msecs_to_jiffies(sc->led_on_duration));
sc->led_on_duration = sc->led_on_cnt ?
max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
ATH_LED_ON_DURATION_IDLE;
sc->led_off_duration = sc->led_off_cnt ?
max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
ATH_LED_OFF_DURATION_IDLE;
sc->led_on_cnt = sc->led_off_cnt = 0;
if (sc->sc_flags & SC_OP_LED_ON)
sc->sc_flags &= ~SC_OP_LED_ON;
else
sc->sc_flags |= SC_OP_LED_ON;
}
static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
struct ath_softc *sc = led->sc;
switch (brightness) {
case LED_OFF:
if (led->led_type == ATH_LED_ASSOC ||
led->led_type == ATH_LED_RADIO) {
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
(led->led_type == ATH_LED_RADIO));
sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
if (led->led_type == ATH_LED_RADIO)
sc->sc_flags &= ~SC_OP_LED_ON;
} else {
sc->led_off_cnt++;
}
break;
case LED_FULL:
if (led->led_type == ATH_LED_ASSOC) {
sc->sc_flags |= SC_OP_LED_ASSOCIATED;
ieee80211_queue_delayed_work(sc->hw,
&sc->ath_led_blink_work, 0);
} else if (led->led_type == ATH_LED_RADIO) {
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
sc->sc_flags |= SC_OP_LED_ON;
} else {
sc->led_on_cnt++;
}
break;
default:
break;
}
}
static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
char *trigger)
{
int ret;
led->sc = sc;
led->led_cdev.name = led->name;
led->led_cdev.default_trigger = trigger;
led->led_cdev.brightness_set = ath_led_brightness;
ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
if (ret)
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
"Failed to register led:%s", led->name);
else
led->registered = 1;
return ret;
}
static void ath_unregister_led(struct ath_led *led)
{
if (led->registered) {
led_classdev_unregister(&led->led_cdev);
led->registered = 0;
}
}
void ath_deinit_leds(struct ath_softc *sc)
{
ath_unregister_led(&sc->assoc_led);
sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
ath_unregister_led(&sc->tx_led);
ath_unregister_led(&sc->rx_led);
ath_unregister_led(&sc->radio_led);
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
}
void ath_init_leds(struct ath_softc *sc)
{
char *trigger;
int ret;
if (AR_SREV_9287(sc->sc_ah))
sc->sc_ah->led_pin = ATH_LED_PIN_9287;
else
sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
/* Configure gpio 1 for output */
ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
/* LED off, active low */
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
trigger = ieee80211_get_radio_led_name(sc->hw);
snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
"ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
ret = ath_register_led(sc, &sc->radio_led, trigger);
sc->radio_led.led_type = ATH_LED_RADIO;
if (ret)
goto fail;
trigger = ieee80211_get_assoc_led_name(sc->hw);
snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
"ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
ret = ath_register_led(sc, &sc->assoc_led, trigger);
sc->assoc_led.led_type = ATH_LED_ASSOC;
if (ret)
goto fail;
trigger = ieee80211_get_tx_led_name(sc->hw);
snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
"ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
ret = ath_register_led(sc, &sc->tx_led, trigger);
sc->tx_led.led_type = ATH_LED_TX;
if (ret)
goto fail;
trigger = ieee80211_get_rx_led_name(sc->hw);
snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
"ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
ret = ath_register_led(sc, &sc->rx_led, trigger);
sc->rx_led.led_type = ATH_LED_RX;
if (ret)
goto fail;
return;
fail:
cancel_delayed_work_sync(&sc->ath_led_blink_work);
ath_deinit_leds(sc);
}
/*******************/
/* Rfkill */
/*******************/
static bool ath_is_rfkill_set(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
ah->rfkill_polarity;
}
void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
}
void ath_start_rfkill_poll(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
wiphy_rfkill_start_polling(sc->hw->wiphy);
}
/******************/
/* BTCOEX */
/******************/
/*
* Detects if there is any priority bt traffic
*/
static void ath_detect_bt_priority(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
btcoex->bt_priority_cnt++;
if (time_after(jiffies, btcoex->bt_priority_time +
msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
"BT scan detected");
sc->sc_flags |= (SC_OP_BT_SCAN |
SC_OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
"BT priority traffic detected");
sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
}
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
}
}
/*
* Configures appropriate weight based on stomp type.
*/
static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
enum ath_stomp_type stomp_type)
{
struct ath_hw *ah = sc->sc_ah;
switch (stomp_type) {
case ATH_BTCOEX_STOMP_ALL:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_ALL_WLAN_WGHT);
break;
case ATH_BTCOEX_STOMP_LOW:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT);
break;
case ATH_BTCOEX_STOMP_NONE:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_NONE_WLAN_WGHT);
break;
default:
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"Invalid Stomptype\n");
break;
}
ath9k_hw_btcoex_enable(ah);
}
static void ath9k_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
u32 timer_next,
u32 timer_period)
{
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
ah->imask |= ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
ah->imask &= ~ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
}
/*
* This is the master bt coex timer which runs for every
* 45ms, bt traffic will be given priority during 55% of this
* period while wlan gets remaining 45%
*/
static void ath_btcoex_period_timer(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *) data;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
u32 timer_period;
bool is_btscan;
ath_detect_bt_priority(sc);
is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
spin_lock_bh(&btcoex->btcoex_lock);
ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
ath9k_gen_timer_start(ah,
btcoex->no_stomp_timer,
(ath9k_hw_gettsf32(ah) +
timer_period), timer_period * 10);
btcoex->hw_timer_enabled = true;
}
mod_timer(&btcoex->period_timer, jiffies +
msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
}
/*
* Generic tsf based hw timer which configures weight
* registers to time slice between wlan and bt traffic
*/
static void ath_btcoex_no_stomp_timer(void *arg)
{
struct ath_softc *sc = (struct ath_softc *)arg;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
spin_unlock_bh(&btcoex->btcoex_lock);
}
int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock);
btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
ath_btcoex_no_stomp_timer,
ath_btcoex_no_stomp_timer,
(void *) sc, AR_FIRST_NDP_TIMER);
if (!btcoex->no_stomp_timer)
return -ENOMEM;
return 0;
}
/*
* (Re)start btcoex timers
*/
void ath9k_btcoex_timer_resume(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"Starting btcoex timers");
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
mod_timer(&btcoex->period_timer, jiffies);
}
/*
* Pause btcoex timer and bt duty cycle timer
*/
void ath9k_btcoex_timer_pause(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
del_timer_sync(&btcoex->period_timer);
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
btcoex->hw_timer_enabled = false;
}
| gpl-2.0 |
V-KING/g3_kernel | arch/x86/mm/ioremap.c | 754 | 15768 | /*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
#include <asm/cacheflush.h>
#include <asm/e820.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/pat.h>
#include "physaddr.h"
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.
*/
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val)
{
unsigned long nrpages = size >> PAGE_SHIFT;
int err;
switch (prot_val) {
case _PAGE_CACHE_UC:
default:
err = _set_memory_uc(vaddr, nrpages);
break;
case _PAGE_CACHE_WC:
err = _set_memory_wc(vaddr, nrpages);
break;
case _PAGE_CACHE_WB:
err = _set_memory_wb(vaddr, nrpages);
break;
}
return err;
}
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
unsigned long i;
for (i = 0; i < nr_pages; ++i)
if (pfn_valid(start_pfn + i) &&
!PageReserved(pfn_to_page(start_pfn + i)))
return 1;
WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
return 0;
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long size, unsigned long prot_val, void *caller)
{
unsigned long offset, vaddr;
resource_size_t pfn, last_pfn, last_addr;
const resource_size_t unaligned_phys_addr = phys_addr;
const unsigned long unaligned_size = size;
struct vm_struct *area;
unsigned long new_prot_val;
pgprot_t prot;
int retval;
void __iomem *ret_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
if (!phys_addr_valid(phys_addr)) {
printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
(unsigned long long)phys_addr);
WARN_ON_ONCE(1);
return NULL;
}
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
if (is_ISA_range(phys_addr, last_addr))
return (__force void __iomem *)phys_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
pfn = phys_addr >> PAGE_SHIFT;
last_pfn = last_addr >> PAGE_SHIFT;
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
__ioremap_check_ram) == 1)
return NULL;
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PHYSICAL_PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
prot_val, &new_prot_val);
if (retval) {
printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
return NULL;
}
if (prot_val != new_prot_val) {
if (!is_new_memtype_allowed(phys_addr, size,
prot_val, new_prot_val)) {
printk(KERN_ERR
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
(unsigned long long)phys_addr,
(unsigned long long)(phys_addr + size),
prot_val, new_prot_val);
goto err_free_memtype;
}
prot_val = new_prot_val;
}
switch (prot_val) {
case _PAGE_CACHE_UC:
default:
prot = PAGE_KERNEL_IO_NOCACHE;
break;
case _PAGE_CACHE_UC_MINUS:
prot = PAGE_KERNEL_IO_UC_MINUS;
break;
case _PAGE_CACHE_WC:
prot = PAGE_KERNEL_IO_WC;
break;
case _PAGE_CACHE_WB:
prot = PAGE_KERNEL_IO;
break;
}
/*
* Ok, go for it..
*/
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
goto err_free_memtype;
area->phys_addr = phys_addr;
vaddr = (unsigned long) area->addr;
if (kernel_map_sync_memtype(phys_addr, size, prot_val))
goto err_free_area;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
goto err_free_area;
ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
/*
* Check if the request spans more than any BAR in the iomem resource
* tree.
*/
WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
return ret_addr;
err_free_area:
free_vm_area(area);
err_free_memtype:
free_memtype(phys_addr, phys_addr + size);
return NULL;
}
/**
* ioremap_nocache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many
* busses. In particular driver authors should read up on PCI writes
*
* It's useful if some control registers are in such an area and
* write combining or read caching is not desirable:
*
* Must be freed with iounmap.
*/
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
/*
* Ideally, this should be:
* pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
*
* Till we fix all X drivers to use ioremap_wc(), we will use
* UC MINUS.
*/
unsigned long val = _PAGE_CACHE_UC_MINUS;
return __ioremap_caller(phys_addr, size, val,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_nocache);
/**
* ioremap_wc - map memory into CPU space write combined
* @offset: bus address of the memory
* @size: size of the resource to map
*
* This version of ioremap ensures that the memory is marked write combining.
* Write combining allows faster writes to some hardware devices.
*
* Must be freed with iounmap.
*/
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{
if (pat_enabled)
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
__builtin_return_address(0));
else
return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);
/**
* iounmap - Free a IO remapping
* @addr: virtual address from ioremap_*
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
void iounmap(volatile void __iomem *addr)
{
struct vm_struct *p, *o;
if ((void __force *)addr <= high_memory)
return;
/*
* __ioremap special-cases the PCI/ISA range by not instantiating a
* vm_area and by simply returning an address into the kernel mapping
* of ISA space. So handle that here.
*/
if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
(void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
return;
addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
mmiotrace_iounmap(addr);
/* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
read_lock(&vmlist_lock);
for (p = vmlist; p; p = p->next) {
if (p->addr == (void __force *)addr)
break;
}
read_unlock(&vmlist_lock);
if (!p) {
printk(KERN_ERR "iounmap: bad address %p\n", addr);
dump_stack();
return;
}
free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */
o = remove_vm_area((void __force *)addr);
BUG_ON(p != o || o == NULL);
kfree(p);
}
EXPORT_SYMBOL(iounmap);
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
void *xlate_dev_mem_ptr(unsigned long phys)
{
void *addr;
unsigned long start = phys & PAGE_MASK;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys);
addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
if (addr)
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
return addr;
}
void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
if (page_is_ram(phys >> PAGE_SHIFT))
return;
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
return;
}
static int __initdata early_ioremap_debug;
static int __init early_ioremap_debug_setup(char *str)
{
early_ioremap_debug = 1;
return 0;
}
early_param("early_ioremap_debug", early_ioremap_debug_setup);
static __initdata int after_paging_init;
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
/* Don't assume we're using swapper_pg_dir at this point */
pgd_t *base = __va(read_cr3());
pgd_t *pgd = &base[pgd_index(addr)];
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
return pmd;
}
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
{
return &bm_pte[pte_index(addr)];
}
bool __init is_early_ioremap_ptep(pte_t *ptep)
{
return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
}
static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
void __init early_ioremap_init(void)
{
pmd_t *pmd;
int i;
if (early_ioremap_debug)
printk(KERN_INFO "early_ioremap_init()\n");
for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
memset(bm_pte, 0, sizeof(bm_pte));
pmd_populate_kernel(&init_mm, pmd, bm_pte);
/*
* The boot-ioremap range spans multiple pmds, for which
* we are not prepared:
*/
#define __FIXADDR_TOP (-PAGE_SIZE)
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#undef __FIXADDR_TOP
if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1);
printk(KERN_WARNING "pmd %p != %p\n",
pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
FIX_BTMAP_BEGIN);
}
}
void __init early_ioremap_reset(void)
{
after_paging_init = 1;
}
static void __init __early_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *pte;
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
}
pte = early_ioremap_pte(addr);
if (pgprot_val(flags))
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
else
pte_clear(&init_mm, addr, pte);
__flush_tlb_one(addr);
}
static inline void __init early_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t prot)
{
if (after_paging_init)
__set_fixmap(idx, phys, prot);
else
__early_set_fixmap(idx, phys, prot);
}
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
{
if (after_paging_init)
clear_fixmap(idx);
else
__early_set_fixmap(idx, 0, __pgprot(0));
}
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
void __init fixup_early_ioremap(void)
{
int i;
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
if (prev_map[i]) {
WARN_ON(1);
break;
}
}
early_ioremap_init();
}
static int __init check_early_ioremap_leak(void)
{
int count = 0;
int i;
for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
if (prev_map[i])
count++;
if (!count)
return 0;
WARN(1, KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.\n",
count);
printk(KERN_WARNING
"please boot with early_ioremap_debug and report the dmesg.\n");
return 1;
}
late_initcall(check_early_ioremap_leak);
static void __init __iomem *
__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
{
unsigned long offset;
resource_size_t last_addr;
unsigned int nrpages;
enum fixed_addresses idx0, idx;
int i, slot;
WARN_ON(system_state != SYSTEM_BOOTING);
slot = -1;
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
if (!prev_map[i]) {
slot = i;
break;
}
}
if (slot < 0) {
printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
(u64)phys_addr, size);
WARN_ON(1);
return NULL;
}
if (early_ioremap_debug) {
printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
(u64)phys_addr, size, slot);
dump_stack();
}
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr) {
WARN_ON(1);
return NULL;
}
prev_size[slot] = size;
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
/*
* Mappings have to fit in the FIX_BTMAP area.
*/
nrpages = size >> PAGE_SHIFT;
if (nrpages > NR_FIX_BTMAPS) {
WARN_ON(1);
return NULL;
}
/*
* Ok, go for it..
*/
idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
idx = idx0;
while (nrpages > 0) {
early_set_fixmap(idx, phys_addr, prot);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
}
if (early_ioremap_debug)
printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
return prev_map[slot];
}
/* Remap an IO device */
void __init __iomem *
early_ioremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}
/* Remap memory */
void __init __iomem *
early_memremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}
void __init early_iounmap(void __iomem *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
unsigned int nrpages;
enum fixed_addresses idx;
int i, slot;
slot = -1;
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
if (prev_map[i] == addr) {
slot = i;
break;
}
}
if (slot < 0) {
printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
addr, size);
WARN_ON(1);
return;
}
if (prev_size[slot] != size) {
printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
addr, size, slot, prev_size[slot]);
WARN_ON(1);
return;
}
if (early_ioremap_debug) {
printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
size, slot);
dump_stack();
}
virt_addr = (unsigned long)addr;
if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
WARN_ON(1);
return;
}
offset = virt_addr & ~PAGE_MASK;
nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
while (nrpages > 0) {
early_clear_fixmap(idx);
--idx;
--nrpages;
}
prev_map[slot] = NULL;
}
| gpl-2.0 |
viaembedded/vab820-kernel-bsp-old | drivers/infiniband/core/umem_rbtree.c | 1266 | 3136 | /*
* Copyright (c) 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interval_tree_generic.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include <rdma/ib_umem_odp.h>
/*
* The ib_umem list keeps track of memory regions for which the HW
* device request to receive notification when the related memory
* mapping is changed.
*
* ib_umem_lock protects the list.
*/
static inline u64 node_start(struct umem_odp_node *n)
{
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
return ib_umem_start(umem_odp->umem);
}
/* Note that the representation of the intervals in the interval tree
* considers the ending point as contained in the interval, while the
* function ib_umem_end returns the first address which is not contained
* in the umem.
*/
static inline u64 node_last(struct umem_odp_node *n)
{
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
return ib_umem_end(umem_odp->umem) - 1;
}
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
node_start, node_last, , rbt_ib_umem)
/* @last is not a part of the interval. See comment for function
* node_last.
*/
int rbt_ib_umem_for_each_in_range(struct rb_root *root,
u64 start, u64 last,
umem_call_back cb,
void *cookie)
{
int ret_val = 0;
struct umem_odp_node *node;
struct ib_umem_odp *umem;
if (unlikely(start == last))
return ret_val;
for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
node = rbt_ib_umem_iter_next(node, start, last - 1)) {
umem = container_of(node, struct ib_umem_odp, interval_tree);
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
}
return ret_val;
}
| gpl-2.0 |
martyborya/htc-kernel-msm7x30 | arch/arm/plat-stmp3xxx/core.c | 1778 | 3168 | /*
* Freescale STMP37XX/STMP378X core routines
*
* Embedded Alley Solutions, Inc <source@embeddedalley.com>
*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
/*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/stmp3xxx.h>
#include <mach/platform.h>
#include <mach/dma.h>
#include <mach/regs-clkctrl.h>
static int __stmp3xxx_reset_block(void __iomem *hwreg, int just_enable)
{
u32 c;
int timeout;
/* the process of software reset of IP block is done
in several steps:
- clear SFTRST and wait for block is enabled;
- clear clock gating (CLKGATE bit);
- set the SFTRST again and wait for block is in reset;
- clear SFTRST and wait for reset completion.
*/
c = __raw_readl(hwreg);
c &= ~(1<<31); /* clear SFTRST */
__raw_writel(c, hwreg);
for (timeout = 1000000; timeout > 0; timeout--)
/* still in SFTRST state ? */
if ((__raw_readl(hwreg) & (1<<31)) == 0)
break;
if (timeout <= 0) {
printk(KERN_ERR"%s(%p): timeout when enabling\n",
__func__, hwreg);
return -ETIME;
}
c = __raw_readl(hwreg);
c &= ~(1<<30); /* clear CLKGATE */
__raw_writel(c, hwreg);
if (!just_enable) {
c = __raw_readl(hwreg);
c |= (1<<31); /* now again set SFTRST */
__raw_writel(c, hwreg);
for (timeout = 1000000; timeout > 0; timeout--)
/* poll until CLKGATE set */
if (__raw_readl(hwreg) & (1<<30))
break;
if (timeout <= 0) {
printk(KERN_ERR"%s(%p): timeout when resetting\n",
__func__, hwreg);
return -ETIME;
}
c = __raw_readl(hwreg);
c &= ~(1<<31); /* clear SFTRST */
__raw_writel(c, hwreg);
for (timeout = 1000000; timeout > 0; timeout--)
/* still in SFTRST state ? */
if ((__raw_readl(hwreg) & (1<<31)) == 0)
break;
if (timeout <= 0) {
printk(KERN_ERR"%s(%p): timeout when enabling "
"after reset\n", __func__, hwreg);
return -ETIME;
}
c = __raw_readl(hwreg);
c &= ~(1<<30); /* clear CLKGATE */
__raw_writel(c, hwreg);
}
for (timeout = 1000000; timeout > 0; timeout--)
/* still in SFTRST state ? */
if ((__raw_readl(hwreg) & (1<<30)) == 0)
break;
if (timeout <= 0) {
printk(KERN_ERR"%s(%p): timeout when unclockgating\n",
__func__, hwreg);
return -ETIME;
}
return 0;
}
int stmp3xxx_reset_block(void __iomem *hwreg, int just_enable)
{
int try = 10;
int r;
while (try--) {
r = __stmp3xxx_reset_block(hwreg, just_enable);
if (!r)
break;
pr_debug("%s: try %d failed\n", __func__, 10 - try);
}
return r;
}
EXPORT_SYMBOL(stmp3xxx_reset_block);
struct platform_device stmp3xxx_dbguart = {
.name = "stmp3xxx-dbguart",
.id = -1,
};
void __init stmp3xxx_init(void)
{
/* Turn off auto-slow and other tricks */
stmp3xxx_clearl(0x7f00000, REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS);
stmp3xxx_dma_init();
}
| gpl-2.0 |
Elite-Kernels/elite_shamu | drivers/i2c/busses/i2c-piix4.c | 2034 | 18676 | /*
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Supports:
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
AMD Hudson-2, CZ
SMSC Victory66
Note: we assume there can only be one device, with one or more
SMBus interfaces.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
/* PIIX4 SMBus address offsets */
#define SMBHSTSTS (0 + piix4_smba)
#define SMBHSLVSTS (1 + piix4_smba)
#define SMBHSTCNT (2 + piix4_smba)
#define SMBHSTCMD (3 + piix4_smba)
#define SMBHSTADD (4 + piix4_smba)
#define SMBHSTDAT0 (5 + piix4_smba)
#define SMBHSTDAT1 (6 + piix4_smba)
#define SMBBLKDAT (7 + piix4_smba)
#define SMBSLVCNT (8 + piix4_smba)
#define SMBSHDWCMD (9 + piix4_smba)
#define SMBSLVEVT (0xA + piix4_smba)
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
#define SMBIOSIZE 8
/* PCI Address Constants */
#define SMBBA 0x090
#define SMBHSTCFG 0x0D2
#define SMBSLVC 0x0D3
#define SMBSHDW1 0x0D4
#define SMBSHDW2 0x0D5
#define SMBREV 0x0D6
/* Other settings */
#define MAX_TIMEOUT 500
#define ENABLE_INT9 0
/* PIIX4 constants */
#define PIIX4_QUICK 0x00
#define PIIX4_BYTE 0x04
#define PIIX4_BYTE_DATA 0x08
#define PIIX4_WORD_DATA 0x0C
#define PIIX4_BLOCK_DATA 0x14
/* insmod parameters */
/* If force is set to anything different from 0, we forcibly enable the
PIIX4. DANGEROUS! */
static int force;
module_param (force, int, 0);
MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!");
/* If force_addr is set to anything different from 0, we forcibly enable
the PIIX4 at the given address. VERY DANGEROUS! */
static int force_addr;
module_param (force_addr, int, 0);
MODULE_PARM_DESC(force_addr,
"Forcibly enable the PIIX4 at the given address. "
"EXTREMELY DANGEROUS!");
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
static const struct dmi_system_id piix4_dmi_blacklist[] = {
{
.ident = "Sapphire AM2RD790",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."),
DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"),
},
},
{
.ident = "DFI Lanparty UT 790FX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."),
DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"),
},
},
{ }
};
/* The IBM entry is in a separate table because we only check it
on Intel-based systems */
static const struct dmi_system_id piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
},
{ },
};
struct i2c_piix4_adapdata {
unsigned short smba;
};
static int piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned char temp;
unsigned short piix4_smba;
if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
(PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5))
srvrworks_csb5_delay = 1;
/* On some motherboards, it was reported that accessing the SMBus
caused severe hardware problems */
if (dmi_check_system(piix4_dmi_blacklist)) {
dev_err(&PIIX4_dev->dev,
"Accessing the SMBus on this system is unsafe!\n");
return -EPERM;
}
/* Don't access SMBus on IBM systems which get corrupted eeproms */
if (dmi_check_system(piix4_dmi_ibm) &&
PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) {
dev_err(&PIIX4_dev->dev, "IBM system detected; this module "
"may corrupt your serial eeprom! Refusing to load "
"module!\n");
return -EPERM;
}
/* Determine the address of the SMBus areas */
if (force_addr) {
piix4_smba = force_addr & 0xfff0;
force = 0;
} else {
pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba);
piix4_smba &= 0xfff0;
if(piix4_smba == 0) {
dev_err(&PIIX4_dev->dev, "SMBus base address "
"uninitialized - upgrade BIOS or use "
"force_addr=0xaddr\n");
return -ENODEV;
}
}
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
piix4_smba);
return -EBUSY;
}
pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
sure, we disable the PIIX4 first. */
if (force_addr) {
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe);
pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba);
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01);
dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to "
"new address %04x!\n", piix4_smba);
} else if ((temp & 1) == 0) {
if (force) {
/* This should never need to be done, but has been
* noted that many Dell machines have the SMBus
* interface on the PIIX4 disabled!? NOTE: This assumes
* I/O space and other allocations WERE done by the
* Bios! Don't complain if your hardware does weird
* things after enabling this. :') Check for Bios
* updates before resorting to this.
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
dev_notice(&PIIX4_dev->dev,
"WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
return -ENODEV;
}
}
if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
else if ((temp & 0x0E) == 0)
dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
else
dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
"(or code out of date)!\n");
pci_read_config_byte(PIIX4_dev, SMBREV, &temp);
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, temp);
return piix4_smba;
}
static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
dev_err(&PIIX4_dev->dev, "SMBus does not support "
"forcing address!\n");
return -EINVAL;
}
/* Determine the address of the SMBus areas */
if (!request_region(smba_idx, 2, "smba_idx")) {
dev_err(&PIIX4_dev->dev, "SMBus base address index region "
"0x%x already in use!\n", smba_idx);
return -EBUSY;
}
outb_p(smb_en, smba_idx);
smba_en_lo = inb_p(smba_idx + 1);
outb_p(smb_en + 1, smba_idx);
smba_en_hi = inb_p(smba_idx + 1);
release_region(smba_idx, 2);
if ((smba_en_lo & 1) == 0) {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
return -ENODEV;
}
piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
piix4_smba);
return -EBUSY;
}
/* Request the SMBus I2C bus config region */
if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) {
dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region "
"0x%x already in use!\n", piix4_smba + i2ccfg_offset);
release_region(piix4_smba, SMBIOSIZE);
return -EBUSY;
}
i2ccfg = inb_p(piix4_smba + i2ccfg_offset);
release_region(piix4_smba + i2ccfg_offset, 1);
if (i2ccfg & 1)
dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
else
dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, i2ccfg >> 4);
return piix4_smba;
}
static int piix4_setup_aux(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id,
unsigned short base_reg_addr)
{
/* Set up auxiliary SMBus controllers found on some
* AMD chipsets e.g. SP5100 (SB700 derivative) */
unsigned short piix4_smba;
/* Read address of auxiliary SMBus controller */
pci_read_config_word(PIIX4_dev, base_reg_addr, &piix4_smba);
if ((piix4_smba & 1) == 0) {
dev_dbg(&PIIX4_dev->dev,
"Auxiliary SMBus controller not enabled\n");
return -ENODEV;
}
piix4_smba &= 0xfff0;
if (piix4_smba == 0) {
dev_dbg(&PIIX4_dev->dev,
"Auxiliary SMBus base address uninitialized\n");
return -ENODEV;
}
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "Auxiliary SMBus region 0x%x "
"already in use!\n", piix4_smba);
return -EBUSY;
}
dev_info(&PIIX4_dev->dev,
"Auxiliary SMBus Host Controller at 0x%x\n",
piix4_smba);
return piix4_smba;
}
static int piix4_transaction(struct i2c_adapter *piix4_adapter)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter);
unsigned short piix4_smba = adapdata->smba;
int temp;
int result = 0;
int timeout = 0;
dev_dbg(&piix4_adapter->dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_dbg(&piix4_adapter->dev, "SMBus busy (%02x). "
"Resetting...\n", temp);
outb_p(temp, SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_err(&piix4_adapter->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&piix4_adapter->dev, "Successful!\n");
}
}
/* start the transaction by setting bit 6 */
outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT);
/* We will always wait for a fraction of a second! (See PIIX4 docs errata) */
if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */
msleep(2);
else
msleep(1);
while ((++timeout < MAX_TIMEOUT) &&
((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
/* If the SMBus is still busy, we give up */
if (timeout == MAX_TIMEOUT) {
dev_err(&piix4_adapter->dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x10) {
result = -EIO;
dev_err(&piix4_adapter->dev, "Error: Failed bus transaction\n");
}
if (temp & 0x08) {
result = -EIO;
dev_dbg(&piix4_adapter->dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
}
if (temp & 0x04) {
result = -ENXIO;
dev_dbg(&piix4_adapter->dev, "Error: no response!\n");
}
if (inb_p(SMBHSTSTS) != 0x00)
outb_p(inb(SMBHSTSTS), SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_err(&piix4_adapter->dev, "Failed reset at end of "
"transaction (%02x)\n", temp);
}
dev_dbg(&piix4_adapter->dev, "Transaction (post): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
return result;
}
/* Return negative errno on error. */
static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
unsigned short piix4_smba = adapdata->smba;
int i, len;
int status;
switch (size) {
case I2C_SMBUS_QUICK:
outb_p((addr << 1) | read_write,
SMBHSTADD);
size = PIIX4_QUICK;
break;
case I2C_SMBUS_BYTE:
outb_p((addr << 1) | read_write,
SMBHSTADD);
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMBHSTCMD);
size = PIIX4_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, SMBHSTDAT0);
size = PIIX4_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
outb_p(data->word & 0xff, SMBHSTDAT0);
outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1);
}
size = PIIX4_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
outb_p(len, SMBHSTDAT0);
i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */
for (i = 1; i <= len; i++)
outb_p(data->block[i], SMBBLKDAT);
}
size = PIIX4_BLOCK_DATA;
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT);
status = piix4_transaction(adap);
if (status)
return status;
if ((read_write == I2C_SMBUS_WRITE) || (size == PIIX4_QUICK))
return 0;
switch (size) {
case PIIX4_BYTE:
case PIIX4_BYTE_DATA:
data->byte = inb_p(SMBHSTDAT0);
break;
case PIIX4_WORD_DATA:
data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8);
break;
case PIIX4_BLOCK_DATA:
data->block[0] = inb_p(SMBHSTDAT0);
if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb_p(SMBBLKDAT);
break;
}
return 0;
}
static u32 piix4_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = piix4_access,
.functionality = piix4_func,
};
static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB5) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB6) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, piix4_ids);
static struct i2c_adapter *piix4_main_adapter;
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
struct i2c_piix4_adapdata *adapdata;
int retval;
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (adap == NULL) {
release_region(smba, SMBIOSIZE);
return -ENOMEM;
}
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &smbus_algorithm;
adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
if (adapdata == NULL) {
kfree(adap);
release_region(smba, SMBIOSIZE);
return -ENOMEM;
}
adapdata->smba = smba;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
snprintf(adap->name, sizeof(adap->name),
"SMBus PIIX4 adapter at %04x", smba);
i2c_set_adapdata(adap, adapdata);
retval = i2c_add_adapter(adap);
if (retval) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
kfree(adapdata);
kfree(adap);
release_region(smba, SMBIOSIZE);
return retval;
}
*padap = adap;
return 0;
}
static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD)
/* base address location etc changed in SB800 */
retval = piix4_setup_sb800(dev, id);
else
retval = piix4_setup(dev, id);
/* If no main SMBus found, give up */
if (retval < 0)
return retval;
/* Try to register main SMBus adapter, give up if we can't */
retval = piix4_add_adapter(dev, retval, &piix4_main_adapter);
if (retval < 0)
return retval;
/* Check for auxiliary SMBus on some AMD chipsets */
if (dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision < 0x40) {
retval = piix4_setup_aux(dev, id, 0x58);
if (retval > 0) {
/* Try to add the aux adapter if it exists,
* piix4_add_adapter will clean up if this fails */
piix4_add_adapter(dev, retval, &piix4_aux_adapter);
}
}
return 0;
}
static void piix4_adap_remove(struct i2c_adapter *adap)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
if (adapdata->smba) {
i2c_del_adapter(adap);
release_region(adapdata->smba, SMBIOSIZE);
kfree(adapdata);
kfree(adap);
}
}
static void piix4_remove(struct pci_dev *dev)
{
if (piix4_main_adapter) {
piix4_adap_remove(piix4_main_adapter);
piix4_main_adapter = NULL;
}
if (piix4_aux_adapter) {
piix4_adap_remove(piix4_aux_adapter);
piix4_aux_adapter = NULL;
}
}
static struct pci_driver piix4_driver = {
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
.remove = piix4_remove,
};
module_pci_driver(piix4_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("PIIX4 SMBus driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
redmi/android_kernel_HM2014811 | lib/div64.c | 2290 | 3158 | /*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
EXPORT_SYMBOL(div_s64_rem);
#endif
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*
* This implementation is a modified version of the algorithm proposed
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
* 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
*/
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high = divisor >> 32;
u64 quot;
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
quot--;
if ((dividend - quot * divisor) >= divisor)
quot++;
}
return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
/**
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*/
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
s64 quot, t;
quot = div64_u64(abs64(dividend), abs64(divisor));
t = (dividend ^ divisor) >> 63;
return (quot ^ t) - t;
}
EXPORT_SYMBOL(div64_s64);
#endif
#endif /* BITS_PER_LONG == 32 */
/*
* Iterative div/mod for use when dividend is not expected to be much
* bigger than divisor.
*/
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
| gpl-2.0 |
allwinner-ics/lichee_linux-3.0 | drivers/staging/msm/mddi_toshiba.c | 3058 | 63018 | /* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include "msm_fb.h"
#include "mddihost.h"
#include "mddihosti.h"
#include "mddi_toshiba.h"
#define TM_GET_DID(id) ((id) & 0xff)
#define TM_GET_PID(id) (((id) & 0xff00)>>8)
#define MDDI_CLIENT_CORE_BASE 0x108000
#define LCD_CONTROL_BLOCK_BASE 0x110000
#define SPI_BLOCK_BASE 0x120000
#define PWM_BLOCK_BASE 0x140000
#define SYSTEM_BLOCK1_BASE 0x160000
#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
#define START (LCD_CONTROL_BLOCK_BASE|0x08)
#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
#define MONI (LCD_CONTROL_BLOCK_BASE|0xB0)
#define VPOS (LCD_CONTROL_BLOCK_BASE|0xC0)
#define SSICTL (SPI_BLOCK_BASE|0x00)
#define SSITIME (SPI_BLOCK_BASE|0x04)
#define SSITX (SPI_BLOCK_BASE|0x08)
#define SSIINTS (SPI_BLOCK_BASE|0x14)
#define TIMER0LOAD (PWM_BLOCK_BASE|0x00)
#define TIMER0CTRL (PWM_BLOCK_BASE|0x08)
#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
#define TIMER1LOAD (PWM_BLOCK_BASE|0x20)
#define TIMER1CTRL (PWM_BLOCK_BASE|0x28)
#define PWM1OFF (PWM_BLOCK_BASE|0x3C)
#define TIMER2LOAD (PWM_BLOCK_BASE|0x40)
#define TIMER2CTRL (PWM_BLOCK_BASE|0x48)
#define PWM2OFF (PWM_BLOCK_BASE|0x5C)
#define PWMCR (PWM_BLOCK_BASE|0x68)
#define GPIOIS (GPIO_BLOCK_BASE|0x08)
#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
#define GPIOIC (GPIO_BLOCK_BASE|0x20)
#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
#define CNT_DIS (SYSTEM_BLOCK1_BASE|0x10)
typedef enum {
TOSHIBA_STATE_OFF,
TOSHIBA_STATE_PRIM_SEC_STANDBY,
TOSHIBA_STATE_PRIM_SEC_READY,
TOSHIBA_STATE_PRIM_NORMAL_MODE,
TOSHIBA_STATE_SEC_NORMAL_MODE
} mddi_toshiba_state_t;
static uint32 mddi_toshiba_curr_vpos;
static boolean mddi_toshiba_monitor_refresh_value = FALSE;
static boolean mddi_toshiba_report_refresh_measurements = FALSE;
boolean mddi_toshiba_61Hz_refresh = TRUE;
/* Modifications to timing to increase refresh rate to > 60Hz.
* 20MHz dot clock.
* 646 total rows.
* 506 total columns.
* refresh rate = 61.19Hz
*/
static uint32 mddi_toshiba_rows_per_second = 39526;
static uint32 mddi_toshiba_usecs_per_refresh = 16344;
static uint32 mddi_toshiba_rows_per_refresh = 646;
extern boolean mddi_vsync_detect_enabled;
static msm_fb_vsync_handler_type mddi_toshiba_vsync_handler;
static void *mddi_toshiba_vsync_handler_arg;
static uint16 mddi_toshiba_vsync_attempts;
static mddi_toshiba_state_t toshiba_state = TOSHIBA_STATE_OFF;
static struct msm_panel_common_pdata *mddi_toshiba_pdata;
static int mddi_toshiba_lcd_on(struct platform_device *pdev);
static int mddi_toshiba_lcd_off(struct platform_device *pdev);
static void mddi_toshiba_state_transition(mddi_toshiba_state_t a,
mddi_toshiba_state_t b)
{
if (toshiba_state != a) {
MDDI_MSG_ERR("toshiba state trans. (%d->%d) found %d\n", a, b,
toshiba_state);
}
toshiba_state = b;
}
#define GORDON_REG_IMGCTL1 0x10 /* Image interface control 1 */
#define GORDON_REG_IMGCTL2 0x11 /* Image interface control 2 */
#define GORDON_REG_IMGSET1 0x12 /* Image interface settings 1 */
#define GORDON_REG_IMGSET2 0x13 /* Image interface settings 2 */
#define GORDON_REG_IVBP1 0x14 /* DM0: Vert back porch */
#define GORDON_REG_IHBP1 0x15 /* DM0: Horiz back porch */
#define GORDON_REG_IVNUM1 0x16 /* DM0: Num of vert lines */
#define GORDON_REG_IHNUM1 0x17 /* DM0: Num of pixels per line */
#define GORDON_REG_IVBP2 0x18 /* DM1: Vert back porch */
#define GORDON_REG_IHBP2 0x19 /* DM1: Horiz back porch */
#define GORDON_REG_IVNUM2 0x1A /* DM1: Num of vert lines */
#define GORDON_REG_IHNUM2 0x1B /* DM1: Num of pixels per line */
#define GORDON_REG_LCDIFCTL1 0x30 /* LCD interface control 1 */
#define GORDON_REG_VALTRAN 0x31 /* LCD IF ctl: VALTRAN sync flag */
#define GORDON_REG_AVCTL 0x33
#define GORDON_REG_LCDIFCTL2 0x34 /* LCD interface control 2 */
#define GORDON_REG_LCDIFCTL3 0x35 /* LCD interface control 3 */
#define GORDON_REG_LCDIFSET1 0x36 /* LCD interface settings 1 */
#define GORDON_REG_PCCTL 0x3C
#define GORDON_REG_TPARAM1 0x40
#define GORDON_REG_TLCDIF1 0x41
#define GORDON_REG_TSSPB_ST1 0x42
#define GORDON_REG_TSSPB_ED1 0x43
#define GORDON_REG_TSCK_ST1 0x44
#define GORDON_REG_TSCK_WD1 0x45
#define GORDON_REG_TGSPB_VST1 0x46
#define GORDON_REG_TGSPB_VED1 0x47
#define GORDON_REG_TGSPB_CH1 0x48
#define GORDON_REG_TGCK_ST1 0x49
#define GORDON_REG_TGCK_ED1 0x4A
#define GORDON_REG_TPCTL_ST1 0x4B
#define GORDON_REG_TPCTL_ED1 0x4C
#define GORDON_REG_TPCHG_ED1 0x4D
#define GORDON_REG_TCOM_CH1 0x4E
#define GORDON_REG_THBP1 0x4F
#define GORDON_REG_TPHCTL1 0x50
#define GORDON_REG_EVPH1 0x51
#define GORDON_REG_EVPL1 0x52
#define GORDON_REG_EVNH1 0x53
#define GORDON_REG_EVNL1 0x54
#define GORDON_REG_TBIAS1 0x55
#define GORDON_REG_TPARAM2 0x56
#define GORDON_REG_TLCDIF2 0x57
#define GORDON_REG_TSSPB_ST2 0x58
#define GORDON_REG_TSSPB_ED2 0x59
#define GORDON_REG_TSCK_ST2 0x5A
#define GORDON_REG_TSCK_WD2 0x5B
#define GORDON_REG_TGSPB_VST2 0x5C
#define GORDON_REG_TGSPB_VED2 0x5D
#define GORDON_REG_TGSPB_CH2 0x5E
#define GORDON_REG_TGCK_ST2 0x5F
#define GORDON_REG_TGCK_ED2 0x60
#define GORDON_REG_TPCTL_ST2 0x61
#define GORDON_REG_TPCTL_ED2 0x62
#define GORDON_REG_TPCHG_ED2 0x63
#define GORDON_REG_TCOM_CH2 0x64
#define GORDON_REG_THBP2 0x65
#define GORDON_REG_TPHCTL2 0x66
#define GORDON_REG_EVPH2 0x67
#define GORDON_REG_EVPL2 0x68
#define GORDON_REG_EVNH2 0x69
#define GORDON_REG_EVNL2 0x6A
#define GORDON_REG_TBIAS2 0x6B
#define GORDON_REG_POWCTL 0x80
#define GORDON_REG_POWOSC1 0x81
#define GORDON_REG_POWOSC2 0x82
#define GORDON_REG_POWSET 0x83
#define GORDON_REG_POWTRM1 0x85
#define GORDON_REG_POWTRM2 0x86
#define GORDON_REG_POWTRM3 0x87
#define GORDON_REG_POWTRMSEL 0x88
#define GORDON_REG_POWHIZ 0x89
void serigo(uint16 reg, uint8 data)
{
uint32 mddi_val = 0;
mddi_queue_register_read(SSIINTS, &mddi_val, TRUE, 0);
if (mddi_val & (1 << 8))
mddi_wait(1);
/* No De-assert of CS and send 2 bytes */
mddi_val = 0x90000 | ((0x00FF & reg) << 8) | data;
mddi_queue_register_write(SSITX, mddi_val, TRUE, 0);
}
void gordon_init(void)
{
/* Image interface settings ***/
serigo(GORDON_REG_IMGCTL2, 0x00);
serigo(GORDON_REG_IMGSET1, 0x01);
/* Exchange the RGB signal for J510(Softbank mobile) */
serigo(GORDON_REG_IMGSET2, 0x12);
serigo(GORDON_REG_LCDIFSET1, 0x00);
mddi_wait(2);
/* Pre-charge settings */
serigo(GORDON_REG_PCCTL, 0x09);
serigo(GORDON_REG_LCDIFCTL2, 0x1B);
mddi_wait(1);
}
void gordon_disp_on(void)
{
/*gordon_dispmode setting */
/*VGA settings */
serigo(GORDON_REG_TPARAM1, 0x30);
serigo(GORDON_REG_TLCDIF1, 0x00);
serigo(GORDON_REG_TSSPB_ST1, 0x8B);
serigo(GORDON_REG_TSSPB_ED1, 0x93);
mddi_wait(2);
serigo(GORDON_REG_TSCK_ST1, 0x88);
serigo(GORDON_REG_TSCK_WD1, 0x00);
serigo(GORDON_REG_TGSPB_VST1, 0x01);
serigo(GORDON_REG_TGSPB_VED1, 0x02);
mddi_wait(2);
serigo(GORDON_REG_TGSPB_CH1, 0x5E);
serigo(GORDON_REG_TGCK_ST1, 0x80);
serigo(GORDON_REG_TGCK_ED1, 0x3C);
serigo(GORDON_REG_TPCTL_ST1, 0x50);
mddi_wait(2);
serigo(GORDON_REG_TPCTL_ED1, 0x74);
serigo(GORDON_REG_TPCHG_ED1, 0x78);
serigo(GORDON_REG_TCOM_CH1, 0x50);
serigo(GORDON_REG_THBP1, 0x84);
mddi_wait(2);
serigo(GORDON_REG_TPHCTL1, 0x00);
serigo(GORDON_REG_EVPH1, 0x70);
serigo(GORDON_REG_EVPL1, 0x64);
serigo(GORDON_REG_EVNH1, 0x56);
mddi_wait(2);
serigo(GORDON_REG_EVNL1, 0x48);
serigo(GORDON_REG_TBIAS1, 0x88);
mddi_wait(2);
serigo(GORDON_REG_TPARAM2, 0x28);
serigo(GORDON_REG_TLCDIF2, 0x14);
serigo(GORDON_REG_TSSPB_ST2, 0x49);
serigo(GORDON_REG_TSSPB_ED2, 0x4B);
mddi_wait(2);
serigo(GORDON_REG_TSCK_ST2, 0x4A);
serigo(GORDON_REG_TSCK_WD2, 0x02);
serigo(GORDON_REG_TGSPB_VST2, 0x02);
serigo(GORDON_REG_TGSPB_VED2, 0x03);
mddi_wait(2);
serigo(GORDON_REG_TGSPB_CH2, 0x2F);
serigo(GORDON_REG_TGCK_ST2, 0x40);
serigo(GORDON_REG_TGCK_ED2, 0x1E);
serigo(GORDON_REG_TPCTL_ST2, 0x2C);
mddi_wait(2);
serigo(GORDON_REG_TPCTL_ED2, 0x3A);
serigo(GORDON_REG_TPCHG_ED2, 0x3C);
serigo(GORDON_REG_TCOM_CH2, 0x28);
serigo(GORDON_REG_THBP2, 0x4D);
mddi_wait(2);
serigo(GORDON_REG_TPHCTL2, 0x1A);
mddi_wait(2);
serigo(GORDON_REG_IVBP1, 0x02);
serigo(GORDON_REG_IHBP1, 0x90);
serigo(GORDON_REG_IVNUM1, 0xA0);
serigo(GORDON_REG_IHNUM1, 0x78);
mddi_wait(2);
serigo(GORDON_REG_IVBP2, 0x02);
serigo(GORDON_REG_IHBP2, 0x48);
serigo(GORDON_REG_IVNUM2, 0x50);
serigo(GORDON_REG_IHNUM2, 0x3C);
mddi_wait(2);
serigo(GORDON_REG_POWCTL, 0x03);
mddi_wait(15);
serigo(GORDON_REG_POWCTL, 0x07);
mddi_wait(15);
serigo(GORDON_REG_POWCTL, 0x0F);
mddi_wait(15);
serigo(GORDON_REG_AVCTL, 0x03);
mddi_wait(15);
serigo(GORDON_REG_POWCTL, 0x1F);
mddi_wait(15);
serigo(GORDON_REG_POWCTL, 0x5F);
mddi_wait(15);
serigo(GORDON_REG_POWCTL, 0x7F);
mddi_wait(15);
serigo(GORDON_REG_LCDIFCTL1, 0x02);
mddi_wait(15);
serigo(GORDON_REG_IMGCTL1, 0x00);
mddi_wait(15);
serigo(GORDON_REG_LCDIFCTL3, 0x00);
mddi_wait(15);
serigo(GORDON_REG_VALTRAN, 0x01);
mddi_wait(15);
serigo(GORDON_REG_LCDIFCTL1, 0x03);
serigo(GORDON_REG_LCDIFCTL1, 0x03);
mddi_wait(1);
}
void gordon_disp_off(void)
{
serigo(GORDON_REG_LCDIFCTL2, 0x7B);
serigo(GORDON_REG_VALTRAN, 0x01);
serigo(GORDON_REG_LCDIFCTL1, 0x02);
serigo(GORDON_REG_LCDIFCTL3, 0x01);
mddi_wait(20);
serigo(GORDON_REG_VALTRAN, 0x01);
serigo(GORDON_REG_IMGCTL1, 0x01);
serigo(GORDON_REG_LCDIFCTL1, 0x00);
mddi_wait(20);
serigo(GORDON_REG_POWCTL, 0x1F);
mddi_wait(40);
serigo(GORDON_REG_POWCTL, 0x07);
mddi_wait(40);
serigo(GORDON_REG_POWCTL, 0x03);
mddi_wait(40);
serigo(GORDON_REG_POWCTL, 0x00);
mddi_wait(40);
}
void gordon_disp_init(void)
{
gordon_init();
mddi_wait(20);
gordon_disp_on();
}
static void toshiba_common_initial_setup(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) {
write_client_reg(DPSET0 , 0x4bec0066, TRUE);
write_client_reg(DPSET1 , 0x00000113, TRUE);
write_client_reg(DPSUS , 0x00000000, TRUE);
write_client_reg(DPRUN , 0x00000001, TRUE);
mddi_wait(5);
write_client_reg(SYSCKENA , 0x00000001, TRUE);
write_client_reg(CLKENB , 0x0000a0e9, TRUE);
write_client_reg(GPIODATA , 0x03FF0000, TRUE);
write_client_reg(GPIODIR , 0x0000024D, TRUE);
write_client_reg(GPIOSEL , 0x00000173, TRUE);
write_client_reg(GPIOPC , 0x03C300C0, TRUE);
write_client_reg(WKREQ , 0x00000000, TRUE);
write_client_reg(GPIOIS , 0x00000000, TRUE);
write_client_reg(GPIOIEV , 0x00000001, TRUE);
write_client_reg(GPIOIC , 0x000003FF, TRUE);
write_client_reg(GPIODATA , 0x00040004, TRUE);
write_client_reg(GPIODATA , 0x00080008, TRUE);
write_client_reg(DRAMPWR , 0x00000001, TRUE);
write_client_reg(CLKENB , 0x0000a0eb, TRUE);
write_client_reg(PWMCR , 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSICTL , 0x00060399, TRUE);
write_client_reg(SSITIME , 0x00000100, TRUE);
write_client_reg(CNT_DIS , 0x00000002, TRUE);
write_client_reg(SSICTL , 0x0006039b, TRUE);
write_client_reg(SSITX , 0x00000000, TRUE);
mddi_wait(7);
write_client_reg(SSITX , 0x00000000, TRUE);
mddi_wait(7);
write_client_reg(SSITX , 0x00000000, TRUE);
mddi_wait(7);
write_client_reg(SSITX , 0x000800BA, TRUE);
write_client_reg(SSITX , 0x00000111, TRUE);
write_client_reg(SSITX , 0x00080036, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x0008003A, TRUE);
write_client_reg(SSITX , 0x00000160, TRUE);
write_client_reg(SSITX , 0x000800B1, TRUE);
write_client_reg(SSITX , 0x0000015D, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B2, TRUE);
write_client_reg(SSITX , 0x00000133, TRUE);
write_client_reg(SSITX , 0x000800B3, TRUE);
write_client_reg(SSITX , 0x00000122, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B4, TRUE);
write_client_reg(SSITX , 0x00000102, TRUE);
write_client_reg(SSITX , 0x000800B5, TRUE);
write_client_reg(SSITX , 0x0000011E, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B6, TRUE);
write_client_reg(SSITX , 0x00000127, TRUE);
write_client_reg(SSITX , 0x000800B7, TRUE);
write_client_reg(SSITX , 0x00000103, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B9, TRUE);
write_client_reg(SSITX , 0x00000124, TRUE);
write_client_reg(SSITX , 0x000800BD, TRUE);
write_client_reg(SSITX , 0x000001A1, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800BB, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
write_client_reg(SSITX , 0x000800BF, TRUE);
write_client_reg(SSITX , 0x00000101, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800BE, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
write_client_reg(SSITX , 0x000800C0, TRUE);
write_client_reg(SSITX , 0x00000111, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C1, TRUE);
write_client_reg(SSITX , 0x00000111, TRUE);
write_client_reg(SSITX , 0x000800C2, TRUE);
write_client_reg(SSITX , 0x00000111, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C3, TRUE);
write_client_reg(SSITX , 0x00080132, TRUE);
write_client_reg(SSITX , 0x00000132, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C4, TRUE);
write_client_reg(SSITX , 0x00080132, TRUE);
write_client_reg(SSITX , 0x00000132, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C5, TRUE);
write_client_reg(SSITX , 0x00080132, TRUE);
write_client_reg(SSITX , 0x00000132, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C6, TRUE);
write_client_reg(SSITX , 0x00080132, TRUE);
write_client_reg(SSITX , 0x00000132, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C7, TRUE);
write_client_reg(SSITX , 0x00080164, TRUE);
write_client_reg(SSITX , 0x00000145, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800C8, TRUE);
write_client_reg(SSITX , 0x00000144, TRUE);
write_client_reg(SSITX , 0x000800C9, TRUE);
write_client_reg(SSITX , 0x00000152, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800CA, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800EC, TRUE);
write_client_reg(SSITX , 0x00080101, TRUE);
write_client_reg(SSITX , 0x000001FC, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800CF, TRUE);
write_client_reg(SSITX , 0x00000101, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D0, TRUE);
write_client_reg(SSITX , 0x00080110, TRUE);
write_client_reg(SSITX , 0x00000104, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D1, TRUE);
write_client_reg(SSITX , 0x00000101, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D2, TRUE);
write_client_reg(SSITX , 0x00080100, TRUE);
write_client_reg(SSITX , 0x00000128, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D3, TRUE);
write_client_reg(SSITX , 0x00080100, TRUE);
write_client_reg(SSITX , 0x00000128, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D4, TRUE);
write_client_reg(SSITX , 0x00080126, TRUE);
write_client_reg(SSITX , 0x000001A4, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800D5, TRUE);
write_client_reg(SSITX , 0x00000120, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800EF, TRUE);
write_client_reg(SSITX , 0x00080132, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
mddi_wait(1);
write_client_reg(BITMAP0 , 0x032001E0, TRUE);
write_client_reg(BITMAP1 , 0x032001E0, TRUE);
write_client_reg(BITMAP2 , 0x014000F0, TRUE);
write_client_reg(BITMAP3 , 0x014000F0, TRUE);
write_client_reg(BITMAP4 , 0x014000F0, TRUE);
write_client_reg(CLKENB , 0x0000A1EB, TRUE);
write_client_reg(PORT_ENB , 0x00000001, TRUE);
write_client_reg(PORT , 0x00000004, TRUE);
write_client_reg(PXL , 0x00000002, TRUE);
write_client_reg(MPLFBUF , 0x00000000, TRUE);
write_client_reg(HCYCLE , 0x000000FD, TRUE);
write_client_reg(HSW , 0x00000003, TRUE);
write_client_reg(HDE_START , 0x00000007, TRUE);
write_client_reg(HDE_SIZE , 0x000000EF, TRUE);
write_client_reg(VCYCLE , 0x00000325, TRUE);
write_client_reg(VSW , 0x00000001, TRUE);
write_client_reg(VDE_START , 0x00000003, TRUE);
write_client_reg(VDE_SIZE , 0x0000031F, TRUE);
write_client_reg(START , 0x00000001, TRUE);
mddi_wait(32);
write_client_reg(SSITX , 0x000800BC, TRUE);
write_client_reg(SSITX , 0x00000180, TRUE);
write_client_reg(SSITX , 0x0008003B, TRUE);
write_client_reg(SSITX , 0x00000100, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B0, TRUE);
write_client_reg(SSITX , 0x00000116, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x000800B8, TRUE);
write_client_reg(SSITX , 0x000801FF, TRUE);
write_client_reg(SSITX , 0x000001F5, TRUE);
mddi_wait(1);
write_client_reg(SSITX , 0x00000011, TRUE);
mddi_wait(5);
write_client_reg(SSITX , 0x00000029, TRUE);
return;
}
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
write_client_reg(DPSET0, 0x4BEC0066, TRUE);
write_client_reg(DPSET1, 0x00000113, TRUE);
write_client_reg(DPSUS, 0x00000000, TRUE);
write_client_reg(DPRUN, 0x00000001, TRUE);
mddi_wait(14);
write_client_reg(SYSCKENA, 0x00000001, TRUE);
write_client_reg(CLKENB, 0x000000EF, TRUE);
write_client_reg(GPIO_BLOCK_BASE, 0x03FF0000, TRUE);
write_client_reg(GPIODIR, 0x0000024D, TRUE);
write_client_reg(SYSTEM_BLOCK2_BASE, 0x00000173, TRUE);
write_client_reg(GPIOPC, 0x03C300C0, TRUE);
write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000000, TRUE);
write_client_reg(GPIOIS, 0x00000000, TRUE);
write_client_reg(GPIOIEV, 0x00000001, TRUE);
write_client_reg(GPIOIC, 0x000003FF, TRUE);
write_client_reg(GPIO_BLOCK_BASE, 0x00060006, TRUE);
write_client_reg(GPIO_BLOCK_BASE, 0x00080008, TRUE);
write_client_reg(GPIO_BLOCK_BASE, 0x02000200, TRUE);
write_client_reg(DRAMPWR, 0x00000001, TRUE);
write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
write_client_reg(PWM_BLOCK_BASE, 0x00001388, TRUE);
write_client_reg(PWM0OFF, 0x00001387, TRUE);
write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
write_client_reg(PWM1OFF, 0x00001387, TRUE);
write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
write_client_reg(PWMCR, 0x00000003, TRUE);
mddi_wait(1);
write_client_reg(SPI_BLOCK_BASE, 0x00063111, TRUE);
write_client_reg(SSITIME, 0x00000100, TRUE);
write_client_reg(SPI_BLOCK_BASE, 0x00063113, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(CLKENB, 0x0000A1EF, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(WRSTB, 0x0000003F, TRUE);
write_client_reg(RDSTB, 0x00000432, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000000, TRUE);
write_client_reg(ASY_DATB, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(10);
write_client_reg(ASY_DATA, 0x80000000, TRUE);
write_client_reg(ASY_DATB, 0x80000000, TRUE);
write_client_reg(ASY_DATC, 0x80000000, TRUE);
write_client_reg(ASY_DATD, 0x80000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(20);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
write_client_reg(VSYNIF, 0x00000001, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
} else {
write_client_reg(DPSET0, 0x4BEC0066, TRUE);
write_client_reg(DPSET1, 0x00000113, TRUE);
write_client_reg(DPSUS, 0x00000000, TRUE);
write_client_reg(DPRUN, 0x00000001, TRUE);
mddi_wait(14);
write_client_reg(SYSCKENA, 0x00000001, TRUE);
write_client_reg(CLKENB, 0x000000EF, TRUE);
write_client_reg(GPIODATA, 0x03FF0000, TRUE);
write_client_reg(GPIODIR, 0x0000024D, TRUE);
write_client_reg(GPIOSEL, 0x00000173, TRUE);
write_client_reg(GPIOPC, 0x03C300C0, TRUE);
write_client_reg(WKREQ, 0x00000000, TRUE);
write_client_reg(GPIOIS, 0x00000000, TRUE);
write_client_reg(GPIOIEV, 0x00000001, TRUE);
write_client_reg(GPIOIC, 0x000003FF, TRUE);
write_client_reg(GPIODATA, 0x00060006, TRUE);
write_client_reg(GPIODATA, 0x00080008, TRUE);
write_client_reg(GPIODATA, 0x02000200, TRUE);
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA) {
mddi_wait(400);
write_client_reg(DRAMPWR, 0x00000001, TRUE);
write_client_reg(CNT_DIS, 0x00000002, TRUE);
write_client_reg(BITMAP0, 0x01E00320, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
write_client_reg(PORT, 0x00000004, TRUE);
write_client_reg(PXL, 0x0000003A, TRUE);
write_client_reg(MPLFBUF, 0x00000000, TRUE);
write_client_reg(HCYCLE, 0x00000253, TRUE);
write_client_reg(HSW, 0x00000003, TRUE);
write_client_reg(HDE_START, 0x00000017, TRUE);
write_client_reg(HDE_SIZE, 0x0000018F, TRUE);
write_client_reg(VCYCLE, 0x000001FF, TRUE);
write_client_reg(VSW, 0x00000001, TRUE);
write_client_reg(VDE_START, 0x00000003, TRUE);
write_client_reg(VDE_SIZE, 0x000001DF, TRUE);
write_client_reg(START, 0x00000001, TRUE);
mddi_wait(1);
write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
write_client_reg(PWM1OFF, 0x00000087, TRUE);
} else {
write_client_reg(DRAMPWR, 0x00000001, TRUE);
write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
write_client_reg(PWM1OFF, 0x00001387, TRUE);
}
write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
write_client_reg(PWMCR, 0x00000003, TRUE);
mddi_wait(1);
write_client_reg(SSICTL, 0x00000799, TRUE);
write_client_reg(SSITIME, 0x00000100, TRUE);
write_client_reg(SSICTL, 0x0000079b, TRUE);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000000, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x000800BA, TRUE);
write_client_reg(SSITX, 0x00000111, TRUE);
write_client_reg(SSITX, 0x00080036, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800BB, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
write_client_reg(SSITX, 0x0008003A, TRUE);
write_client_reg(SSITX, 0x00000160, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800BF, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
write_client_reg(SSITX, 0x000800B1, TRUE);
write_client_reg(SSITX, 0x0000015D, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800B2, TRUE);
write_client_reg(SSITX, 0x00000133, TRUE);
write_client_reg(SSITX, 0x000800B3, TRUE);
write_client_reg(SSITX, 0x00000122, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800B4, TRUE);
write_client_reg(SSITX, 0x00000102, TRUE);
write_client_reg(SSITX, 0x000800B5, TRUE);
write_client_reg(SSITX, 0x0000011F, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800B6, TRUE);
write_client_reg(SSITX, 0x00000128, TRUE);
write_client_reg(SSITX, 0x000800B7, TRUE);
write_client_reg(SSITX, 0x00000103, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800B9, TRUE);
write_client_reg(SSITX, 0x00000120, TRUE);
write_client_reg(SSITX, 0x000800BD, TRUE);
write_client_reg(SSITX, 0x00000102, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800BE, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
write_client_reg(SSITX, 0x000800C0, TRUE);
write_client_reg(SSITX, 0x00000111, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C1, TRUE);
write_client_reg(SSITX, 0x00000111, TRUE);
write_client_reg(SSITX, 0x000800C2, TRUE);
write_client_reg(SSITX, 0x00000111, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C3, TRUE);
write_client_reg(SSITX, 0x0008010A, TRUE);
write_client_reg(SSITX, 0x0000010A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C4, TRUE);
write_client_reg(SSITX, 0x00080160, TRUE);
write_client_reg(SSITX, 0x00000160, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C5, TRUE);
write_client_reg(SSITX, 0x00080160, TRUE);
write_client_reg(SSITX, 0x00000160, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C6, TRUE);
write_client_reg(SSITX, 0x00080160, TRUE);
write_client_reg(SSITX, 0x00000160, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C7, TRUE);
write_client_reg(SSITX, 0x00080133, TRUE);
write_client_reg(SSITX, 0x00000143, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800C8, TRUE);
write_client_reg(SSITX, 0x00000144, TRUE);
write_client_reg(SSITX, 0x000800C9, TRUE);
write_client_reg(SSITX, 0x00000133, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800CA, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800EC, TRUE);
write_client_reg(SSITX, 0x00080102, TRUE);
write_client_reg(SSITX, 0x00000118, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800CF, TRUE);
write_client_reg(SSITX, 0x00000101, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D0, TRUE);
write_client_reg(SSITX, 0x00080110, TRUE);
write_client_reg(SSITX, 0x00000104, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D1, TRUE);
write_client_reg(SSITX, 0x00000101, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D2, TRUE);
write_client_reg(SSITX, 0x00080100, TRUE);
write_client_reg(SSITX, 0x0000013A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D3, TRUE);
write_client_reg(SSITX, 0x00080100, TRUE);
write_client_reg(SSITX, 0x0000013A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D4, TRUE);
write_client_reg(SSITX, 0x00080124, TRUE);
write_client_reg(SSITX, 0x0000016E, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x000800D5, TRUE);
write_client_reg(SSITX, 0x00000124, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800ED, TRUE);
write_client_reg(SSITX, 0x00080101, TRUE);
write_client_reg(SSITX, 0x0000010A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D6, TRUE);
write_client_reg(SSITX, 0x00000101, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D7, TRUE);
write_client_reg(SSITX, 0x00080110, TRUE);
write_client_reg(SSITX, 0x0000010A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D8, TRUE);
write_client_reg(SSITX, 0x00000101, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800D9, TRUE);
write_client_reg(SSITX, 0x00080100, TRUE);
write_client_reg(SSITX, 0x00000114, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800DE, TRUE);
write_client_reg(SSITX, 0x00080100, TRUE);
write_client_reg(SSITX, 0x00000114, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800DF, TRUE);
write_client_reg(SSITX, 0x00080112, TRUE);
write_client_reg(SSITX, 0x0000013F, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E0, TRUE);
write_client_reg(SSITX, 0x0000010B, TRUE);
write_client_reg(SSITX, 0x000800E2, TRUE);
write_client_reg(SSITX, 0x00000101, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E3, TRUE);
write_client_reg(SSITX, 0x00000136, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E4, TRUE);
write_client_reg(SSITX, 0x00080100, TRUE);
write_client_reg(SSITX, 0x00000103, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E5, TRUE);
write_client_reg(SSITX, 0x00080102, TRUE);
write_client_reg(SSITX, 0x00000104, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E6, TRUE);
write_client_reg(SSITX, 0x00000103, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E7, TRUE);
write_client_reg(SSITX, 0x00080104, TRUE);
write_client_reg(SSITX, 0x0000010A, TRUE);
mddi_wait(2);
write_client_reg(SSITX, 0x000800E8, TRUE);
write_client_reg(SSITX, 0x00000104, TRUE);
write_client_reg(CLKENB, 0x000001EF, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(WRSTB, 0x0000003F, TRUE);
write_client_reg(RDSTB, 0x00000432, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000000, TRUE);
write_client_reg(ASY_DATB, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(10);
write_client_reg(ASY_DATA, 0x80000000, TRUE);
write_client_reg(ASY_DATB, 0x80000000, TRUE);
write_client_reg(ASY_DATC, 0x80000000, TRUE);
write_client_reg(ASY_DATD, 0x80000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(20);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
write_client_reg(VSYNIF, 0x00000001, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
}
mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_STANDBY,
TOSHIBA_STATE_PRIM_SEC_READY);
}
static void toshiba_prim_start(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
write_client_reg(BITMAP1, 0x01E000F0, TRUE);
write_client_reg(BITMAP2, 0x01E000F0, TRUE);
write_client_reg(BITMAP3, 0x01E000F0, TRUE);
write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
write_client_reg(CLKENB, 0x000001EF, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
write_client_reg(PORT, 0x00000016, TRUE);
write_client_reg(PXL, 0x00000002, TRUE);
write_client_reg(MPLFBUF, 0x00000000, TRUE);
write_client_reg(HCYCLE, 0x00000185, TRUE);
write_client_reg(HSW, 0x00000018, TRUE);
write_client_reg(HDE_START, 0x0000004A, TRUE);
write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
write_client_reg(VCYCLE, 0x0000028E, TRUE);
write_client_reg(VSW, 0x00000004, TRUE);
write_client_reg(VDE_START, 0x00000009, TRUE);
write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
write_client_reg(START, 0x00000001, TRUE);
write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000002, TRUE);
} else{
write_client_reg(VSYNIF, 0x00000001, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
write_client_reg(BITMAP1, 0x01E000F0, TRUE);
write_client_reg(BITMAP2, 0x01E000F0, TRUE);
write_client_reg(BITMAP3, 0x01E000F0, TRUE);
write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
write_client_reg(CLKENB, 0x000001EF, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
write_client_reg(PORT, 0x00000004, TRUE);
write_client_reg(PXL, 0x00000002, TRUE);
write_client_reg(MPLFBUF, 0x00000000, TRUE);
if (mddi_toshiba_61Hz_refresh) {
write_client_reg(HCYCLE, 0x000000FC, TRUE);
mddi_toshiba_rows_per_second = 39526;
mddi_toshiba_rows_per_refresh = 646;
mddi_toshiba_usecs_per_refresh = 16344;
} else {
write_client_reg(HCYCLE, 0x0000010b, TRUE);
mddi_toshiba_rows_per_second = 37313;
mddi_toshiba_rows_per_refresh = 646;
mddi_toshiba_usecs_per_refresh = 17313;
}
write_client_reg(HSW, 0x00000003, TRUE);
write_client_reg(HDE_START, 0x00000007, TRUE);
write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
write_client_reg(VCYCLE, 0x00000285, TRUE);
write_client_reg(VSW, 0x00000001, TRUE);
write_client_reg(VDE_START, 0x00000003, TRUE);
write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
write_client_reg(START, 0x00000001, TRUE);
mddi_wait(10);
write_client_reg(SSITX, 0x000800BC, TRUE);
write_client_reg(SSITX, 0x00000180, TRUE);
write_client_reg(SSITX, 0x0008003B, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x000800B0, TRUE);
write_client_reg(SSITX, 0x00000116, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x000800B8, TRUE);
write_client_reg(SSITX, 0x000801FF, TRUE);
write_client_reg(SSITX, 0x000001F5, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x00000011, TRUE);
write_client_reg(SSITX, 0x00000029, TRUE);
write_client_reg(WKREQ, 0x00000000, TRUE);
write_client_reg(WAKEUP, 0x00000000, TRUE);
write_client_reg(INTMSK, 0x00000001, TRUE);
}
mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
TOSHIBA_STATE_PRIM_NORMAL_MODE);
}
static void toshiba_sec_start(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(CLKENB, 0x000011EF, TRUE);
write_client_reg(BITMAP0, 0x028001E0, TRUE);
write_client_reg(BITMAP1, 0x00000000, TRUE);
write_client_reg(BITMAP2, 0x00000000, TRUE);
write_client_reg(BITMAP3, 0x00000000, TRUE);
write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
write_client_reg(PORT, 0x00000000, TRUE);
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(MPLFBUF, 0x00000004, TRUE);
write_client_reg(HCYCLE, 0x0000006B, TRUE);
write_client_reg(HSW, 0x00000003, TRUE);
write_client_reg(HDE_START, 0x00000007, TRUE);
write_client_reg(HDE_SIZE, 0x00000057, TRUE);
write_client_reg(VCYCLE, 0x000000E6, TRUE);
write_client_reg(VSW, 0x00000001, TRUE);
write_client_reg(VDE_START, 0x00000003, TRUE);
write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
write_client_reg(ASY_DATA, 0x80000001, TRUE);
write_client_reg(ASY_DATB, 0x0000011B, TRUE);
write_client_reg(ASY_DATC, 0x80000002, TRUE);
write_client_reg(ASY_DATD, 0x00000700, TRUE);
write_client_reg(ASY_DATE, 0x80000003, TRUE);
write_client_reg(ASY_DATF, 0x00000230, TRUE);
write_client_reg(ASY_DATG, 0x80000008, TRUE);
write_client_reg(ASY_DATH, 0x00000402, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000009, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_DATC, 0x8000000B, TRUE);
write_client_reg(ASY_DATD, 0x00000000, TRUE);
write_client_reg(ASY_DATE, 0x8000000C, TRUE);
write_client_reg(ASY_DATF, 0x00000000, TRUE);
write_client_reg(ASY_DATG, 0x8000000D, TRUE);
write_client_reg(ASY_DATH, 0x00000409, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x8000000E, TRUE);
write_client_reg(ASY_DATB, 0x00000409, TRUE);
write_client_reg(ASY_DATC, 0x80000030, TRUE);
write_client_reg(ASY_DATD, 0x00000000, TRUE);
write_client_reg(ASY_DATE, 0x80000031, TRUE);
write_client_reg(ASY_DATF, 0x00000100, TRUE);
write_client_reg(ASY_DATG, 0x80000032, TRUE);
write_client_reg(ASY_DATH, 0x00000104, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000033, TRUE);
write_client_reg(ASY_DATB, 0x00000400, TRUE);
write_client_reg(ASY_DATC, 0x80000034, TRUE);
write_client_reg(ASY_DATD, 0x00000306, TRUE);
write_client_reg(ASY_DATE, 0x80000035, TRUE);
write_client_reg(ASY_DATF, 0x00000706, TRUE);
write_client_reg(ASY_DATG, 0x80000036, TRUE);
write_client_reg(ASY_DATH, 0x00000707, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000037, TRUE);
write_client_reg(ASY_DATB, 0x00000004, TRUE);
write_client_reg(ASY_DATC, 0x80000038, TRUE);
write_client_reg(ASY_DATD, 0x00000000, TRUE);
write_client_reg(ASY_DATE, 0x80000039, TRUE);
write_client_reg(ASY_DATF, 0x00000000, TRUE);
write_client_reg(ASY_DATG, 0x8000003A, TRUE);
write_client_reg(ASY_DATH, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000044, TRUE);
write_client_reg(ASY_DATB, 0x0000AF00, TRUE);
write_client_reg(ASY_DATC, 0x80000045, TRUE);
write_client_reg(ASY_DATD, 0x0000DB00, TRUE);
write_client_reg(ASY_DATE, 0x08000042, TRUE);
write_client_reg(ASY_DATF, 0x0000DB00, TRUE);
write_client_reg(ASY_DATG, 0x80000021, TRUE);
write_client_reg(ASY_DATH, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(PXL, 0x0000000C, TRUE);
write_client_reg(VSYNIF, 0x00000001, TRUE);
write_client_reg(ASY_DATA, 0x80000022, TRUE);
write_client_reg(ASY_CMDSET, 0x00000003, TRUE);
write_client_reg(START, 0x00000001, TRUE);
mddi_wait(60);
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000050, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_DATC, 0x80000051, TRUE);
write_client_reg(ASY_DATD, 0x00000E00, TRUE);
write_client_reg(ASY_DATE, 0x80000052, TRUE);
write_client_reg(ASY_DATF, 0x00000D01, TRUE);
write_client_reg(ASY_DATG, 0x80000053, TRUE);
write_client_reg(ASY_DATH, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
write_client_reg(ASY_DATA, 0x80000058, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_DATC, 0x8000005A, TRUE);
write_client_reg(ASY_DATD, 0x00000E01, TRUE);
write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
write_client_reg(ASY_DATA, 0x80000011, TRUE);
write_client_reg(ASY_DATB, 0x00000812, TRUE);
write_client_reg(ASY_DATC, 0x80000012, TRUE);
write_client_reg(ASY_DATD, 0x00000003, TRUE);
write_client_reg(ASY_DATE, 0x80000013, TRUE);
write_client_reg(ASY_DATF, 0x00000909, TRUE);
write_client_reg(ASY_DATG, 0x80000010, TRUE);
write_client_reg(ASY_DATH, 0x00000040, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
mddi_wait(40);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000340, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(60);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00003340, TRUE);
write_client_reg(ASY_DATC, 0x80000007, TRUE);
write_client_reg(ASY_DATD, 0x00004007, TRUE);
write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
mddi_wait(1);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004017, TRUE);
write_client_reg(ASY_DATC, 0x8000005B, TRUE);
write_client_reg(ASY_DATD, 0x00000000, TRUE);
write_client_reg(ASY_DATE, 0x80000059, TRUE);
write_client_reg(ASY_DATF, 0x00000011, TRUE);
write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
mddi_wait(20);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
/* LTPS I/F control */
write_client_reg(ASY_DATB, 0x00000019, TRUE);
/* Direct cmd transfer enable */
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
/* Direct cmd transfer disable */
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(20);
/* Index setting of SUB LCDD */
write_client_reg(ASY_DATA, 0x80000059, TRUE);
/* LTPS I/F control */
write_client_reg(ASY_DATB, 0x00000079, TRUE);
/* Direct cmd transfer enable */
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
/* Direct cmd transfer disable */
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(20);
/* Index setting of SUB LCDD */
write_client_reg(ASY_DATA, 0x80000059, TRUE);
/* LTPS I/F control */
write_client_reg(ASY_DATB, 0x000003FD, TRUE);
/* Direct cmd transfer enable */
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
/* Direct cmd transfer disable */
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(20);
mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
TOSHIBA_STATE_SEC_NORMAL_MODE);
}
static void toshiba_prim_lcd_off(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
gordon_disp_off();
} else{
/* Main panel power off (Deep standby in) */
write_client_reg(SSITX, 0x000800BC, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
write_client_reg(SSITX, 0x00000028, TRUE);
mddi_wait(1);
write_client_reg(SSITX, 0x000800B8, TRUE);
write_client_reg(SSITX, 0x00000180, TRUE);
write_client_reg(SSITX, 0x00000102, TRUE);
write_client_reg(SSITX, 0x00000010, TRUE);
}
write_client_reg(PORT, 0x00000003, TRUE);
write_client_reg(REGENB, 0x00000001, TRUE);
mddi_wait(1);
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(REGENB, 0x00000001, TRUE);
mddi_wait(3);
if (TM_GET_PID(mfd->panel.id) != LCD_SHARP_2P4_VGA) {
write_client_reg(SSITX, 0x000800B0, TRUE);
write_client_reg(SSITX, 0x00000100, TRUE);
}
mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_NORMAL_MODE,
TOSHIBA_STATE_PRIM_SEC_STANDBY);
}
static void toshiba_sec_lcd_off(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004016, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000019, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x0000000B, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000002, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(4);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000300, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(4);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004004, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(PORT, 0x00000000, TRUE);
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(VSYNIF, 0x00000001, TRUE);
write_client_reg(PORT_ENB, 0x00000001, TRUE);
write_client_reg(REGENB, 0x00000001, TRUE);
mddi_toshiba_state_transition(TOSHIBA_STATE_SEC_NORMAL_MODE,
TOSHIBA_STATE_PRIM_SEC_STANDBY);
}
static void toshiba_sec_cont_update_start(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(INTMASK, 0x00000001, TRUE);
write_client_reg(TTBUSSEL, 0x0000000B, TRUE);
write_client_reg(MONI, 0x00000008, TRUE);
write_client_reg(CLKENB, 0x000000EF, TRUE);
write_client_reg(CLKENB, 0x000010EF, TRUE);
write_client_reg(CLKENB, 0x000011EF, TRUE);
write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
write_client_reg(HCYCLE, 0x0000006B, TRUE);
write_client_reg(HSW, 0x00000003, TRUE);
write_client_reg(HDE_START, 0x00000002, TRUE);
write_client_reg(HDE_SIZE, 0x00000057, TRUE);
write_client_reg(VCYCLE, 0x000000E6, TRUE);
write_client_reg(VSW, 0x00000001, TRUE);
write_client_reg(VDE_START, 0x00000003, TRUE);
write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
write_client_reg(WRSTB, 0x00000015, TRUE);
write_client_reg(MPLFBUF, 0x00000004, TRUE);
write_client_reg(ASY_DATA, 0x80000021, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_DATC, 0x80000022, TRUE);
write_client_reg(ASY_CMDSET, 0x00000007, TRUE);
write_client_reg(PXL, 0x00000089, TRUE);
write_client_reg(VSYNIF, 0x00000001, TRUE);
mddi_wait(2);
}
static void toshiba_sec_cont_update_stop(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
mddi_wait(3);
write_client_reg(SRST, 0x00000002, TRUE);
mddi_wait(3);
write_client_reg(SRST, 0x00000003, TRUE);
}
static void toshiba_sec_backlight_on(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
write_client_reg(PWM0OFF, 0x00000001, TRUE);
write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
write_client_reg(PWM1OFF, 0x00001387, TRUE);
write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
write_client_reg(PWMCR, 0x00000003, TRUE);
}
static void toshiba_sec_sleep_in(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004016, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000019, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x0000000B, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000002, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(4);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000300, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(4);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000000, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004004, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(PORT, 0x00000000, TRUE);
write_client_reg(PXL, 0x00000000, TRUE);
write_client_reg(START, 0x00000000, TRUE);
write_client_reg(REGENB, 0x00000001, TRUE);
/* Sleep in sequence */
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000302, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
}
static void toshiba_sec_sleep_out(struct msm_fb_data_type *mfd)
{
if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
return;
write_client_reg(VSYNIF, 0x00000000, TRUE);
write_client_reg(PORT_ENB, 0x00000002, TRUE);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000300, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
/* Display ON sequence */
write_client_reg(ASY_DATA, 0x80000011, TRUE);
write_client_reg(ASY_DATB, 0x00000812, TRUE);
write_client_reg(ASY_DATC, 0x80000012, TRUE);
write_client_reg(ASY_DATD, 0x00000003, TRUE);
write_client_reg(ASY_DATE, 0x80000013, TRUE);
write_client_reg(ASY_DATF, 0x00000909, TRUE);
write_client_reg(ASY_DATG, 0x80000010, TRUE);
write_client_reg(ASY_DATH, 0x00000040, TRUE);
write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
mddi_wait(4);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00000340, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(6);
write_client_reg(ASY_DATA, 0x80000010, TRUE);
write_client_reg(ASY_DATB, 0x00003340, TRUE);
write_client_reg(ASY_DATC, 0x80000007, TRUE);
write_client_reg(ASY_DATD, 0x00004007, TRUE);
write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
mddi_wait(1);
write_client_reg(ASY_DATA, 0x80000007, TRUE);
write_client_reg(ASY_DATB, 0x00004017, TRUE);
write_client_reg(ASY_DATC, 0x8000005B, TRUE);
write_client_reg(ASY_DATD, 0x00000000, TRUE);
write_client_reg(ASY_DATE, 0x80000059, TRUE);
write_client_reg(ASY_DATF, 0x00000011, TRUE);
write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000019, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x00000079, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
write_client_reg(ASY_DATA, 0x80000059, TRUE);
write_client_reg(ASY_DATB, 0x000003FD, TRUE);
write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
mddi_wait(2);
}
static void mddi_toshiba_lcd_set_backlight(struct msm_fb_data_type *mfd)
{
int32 level;
int ret = -EPERM;
int max = mfd->panel_info.bl_max;
int min = mfd->panel_info.bl_min;
if (mddi_toshiba_pdata && mddi_toshiba_pdata->pmic_backlight) {
ret = mddi_toshiba_pdata->pmic_backlight(mfd->bl_level);
if (!ret)
return;
}
if (ret && mddi_toshiba_pdata && mddi_toshiba_pdata->backlight_level) {
level = mddi_toshiba_pdata->backlight_level(mfd->bl_level,
max, min);
if (level < 0)
return;
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
} else {
if (!max)
level = 0;
else
level = (mfd->bl_level * 4999) / max;
}
write_client_reg(PWM0OFF, level, TRUE);
}
static void mddi_toshiba_vsync_set_handler(msm_fb_vsync_handler_type handler, /* ISR to be executed */
void *arg)
{
boolean error = FALSE;
unsigned long flags;
/* Disable interrupts */
spin_lock_irqsave(&mddi_host_spin_lock, flags);
/* INTLOCK(); */
if (mddi_toshiba_vsync_handler != NULL) {
error = TRUE;
} else {
/* Register the handler for this particular GROUP interrupt source */
mddi_toshiba_vsync_handler = handler;
mddi_toshiba_vsync_handler_arg = arg;
}
/* Restore interrupts */
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
/* MDDI_INTFREE(); */
if (error) {
MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n");
} else {
/* Enable the vsync wakeup */
mddi_queue_register_write(INTMSK, 0x0000, FALSE, 0);
mddi_toshiba_vsync_attempts = 1;
mddi_vsync_detect_enabled = TRUE;
}
} /* mddi_toshiba_vsync_set_handler */
static void mddi_toshiba_lcd_vsync_detected(boolean detected)
{
/* static timetick_type start_time = 0; */
static struct timeval start_time;
static boolean first_time = TRUE;
/* uint32 mdp_cnt_val = 0; */
/* timetick_type elapsed_us; */
struct timeval now;
uint32 elapsed_us;
uint32 num_vsyncs;
if ((detected) || (mddi_toshiba_vsync_attempts > 5)) {
if ((detected) && (mddi_toshiba_monitor_refresh_value)) {
/* if (start_time != 0) */
if (!first_time) {
jiffies_to_timeval(jiffies, &now);
elapsed_us =
(now.tv_sec - start_time.tv_sec) * 1000000 +
now.tv_usec - start_time.tv_usec;
/*
* LCD is configured for a refresh every usecs,
* so to determine the number of vsyncs that
* have occurred since the last measurement
* add half that to the time difference and
* divide by the refresh rate.
*/
num_vsyncs = (elapsed_us +
(mddi_toshiba_usecs_per_refresh >>
1)) /
mddi_toshiba_usecs_per_refresh;
/*
* LCD is configured for * hsyncs (rows) per
* refresh cycle. Calculate new rows_per_second
* value based upon these new measurements.
* MDP can update with this new value.
*/
mddi_toshiba_rows_per_second =
(mddi_toshiba_rows_per_refresh * 1000 *
num_vsyncs) / (elapsed_us / 1000);
}
/* start_time = timetick_get(); */
first_time = FALSE;
jiffies_to_timeval(jiffies, &start_time);
if (mddi_toshiba_report_refresh_measurements) {
(void)mddi_queue_register_read_int(VPOS,
&mddi_toshiba_curr_vpos);
/* mdp_cnt_val = MDP_LINE_COUNT; */
}
}
/* if detected = TRUE, client initiated wakeup was detected */
if (mddi_toshiba_vsync_handler != NULL) {
(*mddi_toshiba_vsync_handler)
(mddi_toshiba_vsync_handler_arg);
mddi_toshiba_vsync_handler = NULL;
}
mddi_vsync_detect_enabled = FALSE;
mddi_toshiba_vsync_attempts = 0;
/* need to disable the interrupt wakeup */
if (!mddi_queue_register_write_int(INTMSK, 0x0001))
MDDI_MSG_ERR("Vsync interrupt disable failed!\n");
if (!detected) {
/* give up after 5 failed attempts but show error */
MDDI_MSG_NOTICE("Vsync detection failed!\n");
} else if ((mddi_toshiba_monitor_refresh_value) &&
(mddi_toshiba_report_refresh_measurements)) {
MDDI_MSG_NOTICE(" Last Line Counter=%d!\n",
mddi_toshiba_curr_vpos);
/* MDDI_MSG_NOTICE(" MDP Line Counter=%d!\n",mdp_cnt_val); */
MDDI_MSG_NOTICE(" Lines Per Second=%d!\n",
mddi_toshiba_rows_per_second);
}
/* clear the interrupt */
if (!mddi_queue_register_write_int(INTFLG, 0x0001))
MDDI_MSG_ERR("Vsync interrupt clear failed!\n");
} else {
/* if detected = FALSE, we woke up from hibernation, but did not
* detect client initiated wakeup.
*/
mddi_toshiba_vsync_attempts++;
}
}
static void mddi_toshiba_prim_init(struct msm_fb_data_type *mfd)
{
switch (toshiba_state) {
case TOSHIBA_STATE_PRIM_SEC_READY:
break;
case TOSHIBA_STATE_OFF:
toshiba_state = TOSHIBA_STATE_PRIM_SEC_STANDBY;
toshiba_common_initial_setup(mfd);
break;
case TOSHIBA_STATE_PRIM_SEC_STANDBY:
toshiba_common_initial_setup(mfd);
break;
case TOSHIBA_STATE_SEC_NORMAL_MODE:
toshiba_sec_cont_update_stop(mfd);
toshiba_sec_sleep_in(mfd);
toshiba_sec_sleep_out(mfd);
toshiba_sec_lcd_off(mfd);
toshiba_common_initial_setup(mfd);
break;
default:
MDDI_MSG_ERR("mddi_toshiba_prim_init from state %d\n",
toshiba_state);
}
toshiba_prim_start(mfd);
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
gordon_disp_init();
mddi_host_write_pix_attr_reg(0x00C3);
}
static void mddi_toshiba_sec_init(struct msm_fb_data_type *mfd)
{
switch (toshiba_state) {
case TOSHIBA_STATE_PRIM_SEC_READY:
break;
case TOSHIBA_STATE_PRIM_SEC_STANDBY:
toshiba_common_initial_setup(mfd);
break;
case TOSHIBA_STATE_PRIM_NORMAL_MODE:
toshiba_prim_lcd_off(mfd);
toshiba_common_initial_setup(mfd);
break;
default:
MDDI_MSG_ERR("mddi_toshiba_sec_init from state %d\n",
toshiba_state);
}
toshiba_sec_start(mfd);
toshiba_sec_backlight_on(mfd);
toshiba_sec_cont_update_start(mfd);
mddi_host_write_pix_attr_reg(0x0400);
}
static void mddi_toshiba_lcd_powerdown(struct msm_fb_data_type *mfd)
{
switch (toshiba_state) {
case TOSHIBA_STATE_PRIM_SEC_READY:
mddi_toshiba_prim_init(mfd);
mddi_toshiba_lcd_powerdown(mfd);
return;
case TOSHIBA_STATE_PRIM_SEC_STANDBY:
break;
case TOSHIBA_STATE_PRIM_NORMAL_MODE:
toshiba_prim_lcd_off(mfd);
break;
case TOSHIBA_STATE_SEC_NORMAL_MODE:
toshiba_sec_cont_update_stop(mfd);
toshiba_sec_sleep_in(mfd);
toshiba_sec_sleep_out(mfd);
toshiba_sec_lcd_off(mfd);
break;
default:
MDDI_MSG_ERR("mddi_toshiba_lcd_powerdown from state %d\n",
toshiba_state);
}
}
static int mddi_sharpgordon_firsttime = 1;
static int mddi_toshiba_lcd_on(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
if (TM_GET_DID(mfd->panel.id) == TOSHIBA_VGA_PRIM)
mddi_toshiba_prim_init(mfd);
else
mddi_toshiba_sec_init(mfd);
if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
if (mddi_sharpgordon_firsttime) {
mddi_sharpgordon_firsttime = 0;
write_client_reg(REGENB, 0x00000001, TRUE);
}
}
return 0;
}
static int mddi_toshiba_lcd_off(struct platform_device *pdev)
{
mddi_toshiba_lcd_powerdown(platform_get_drvdata(pdev));
return 0;
}
static int __init mddi_toshiba_lcd_probe(struct platform_device *pdev)
{
if (pdev->id == 0) {
mddi_toshiba_pdata = pdev->dev.platform_data;
return 0;
}
msm_fb_add_device(pdev);
return 0;
}
static struct platform_driver this_driver = {
.probe = mddi_toshiba_lcd_probe,
.driver = {
.name = "mddi_toshiba",
},
};
static struct msm_fb_panel_data toshiba_panel_data = {
.on = mddi_toshiba_lcd_on,
.off = mddi_toshiba_lcd_off,
};
static int ch_used[3];
int mddi_toshiba_device_register(struct msm_panel_info *pinfo,
u32 channel, u32 panel)
{
struct platform_device *pdev = NULL;
int ret;
if ((channel >= 3) || ch_used[channel])
return -ENODEV;
if ((channel != TOSHIBA_VGA_PRIM) &&
mddi_toshiba_pdata && mddi_toshiba_pdata->panel_num)
if (mddi_toshiba_pdata->panel_num() < 2)
return -ENODEV;
ch_used[channel] = TRUE;
pdev = platform_device_alloc("mddi_toshiba", (panel << 8)|channel);
if (!pdev)
return -ENOMEM;
if (channel == TOSHIBA_VGA_PRIM) {
toshiba_panel_data.set_backlight =
mddi_toshiba_lcd_set_backlight;
if (pinfo->lcd.vsync_enable) {
toshiba_panel_data.set_vsync_notifier =
mddi_toshiba_vsync_set_handler;
mddi_lcd.vsync_detected =
mddi_toshiba_lcd_vsync_detected;
}
} else {
toshiba_panel_data.set_backlight = NULL;
toshiba_panel_data.set_vsync_notifier = NULL;
}
toshiba_panel_data.panel_info = *pinfo;
ret = platform_device_add_data(pdev, &toshiba_panel_data,
sizeof(toshiba_panel_data));
if (ret) {
printk(KERN_ERR
"%s: platform_device_add_data failed!\n", __func__);
goto err_device_put;
}
ret = platform_device_add(pdev);
if (ret) {
printk(KERN_ERR
"%s: platform_device_register failed!\n", __func__);
goto err_device_put;
}
return 0;
err_device_put:
platform_device_put(pdev);
return ret;
}
static int __init mddi_toshiba_lcd_init(void)
{
return platform_driver_register(&this_driver);
}
module_init(mddi_toshiba_lcd_init);
| gpl-2.0 |
airikka/denver_tac-70072_kernel | drivers/macintosh/rack-meter.c | 3058 | 15820 | /*
* RackMac vu-meter driver
*
* (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Released under the term of the GNU GPL v2.
*
* Support the CPU-meter LEDs of the Xserve G5
*
* TODO: Implement PWM to do variable intensity and provide userland
* interface for fun. Also, the CPU-meter could be made nicer by being
* a bit less "immediate" but giving instead a more average load over
* time. Patches welcome :-)
*
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
#include <asm/macio.h>
#include <asm/keylargo.h>
/* Number of samples in a sample buffer */
#define SAMPLE_COUNT 256
/* CPU meter sampling rate in ms */
#define CPU_SAMPLING_RATE 250
struct rackmeter_dma {
struct dbdma_cmd cmd[4] ____cacheline_aligned;
u32 mark ____cacheline_aligned;
u32 buf1[SAMPLE_COUNT] ____cacheline_aligned;
u32 buf2[SAMPLE_COUNT] ____cacheline_aligned;
} ____cacheline_aligned;
struct rackmeter_cpu {
struct delayed_work sniffer;
struct rackmeter *rm;
cputime64_t prev_wall;
cputime64_t prev_idle;
int zero;
} ____cacheline_aligned;
struct rackmeter {
struct macio_dev *mdev;
unsigned int irq;
struct device_node *i2s;
u8 *ubuf;
struct dbdma_regs __iomem *dma_regs;
void __iomem *i2s_regs;
dma_addr_t dma_buf_p;
struct rackmeter_dma *dma_buf_v;
int stale_irq;
struct rackmeter_cpu cpu[2];
int paused;
struct mutex sem;
};
/* To be set as a tunable */
static int rackmeter_ignore_nice;
/* This GPIO is whacked by the OS X driver when initializing */
#define RACKMETER_MAGIC_GPIO 0x78
/* This is copied from cpufreq_ondemand, maybe we should put it in
* a common header somewhere
*/
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
cputime64_t retval;
retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
kstat_cpu(cpu).cpustat.iowait);
if (rackmeter_ignore_nice)
retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
return retval;
}
static void rackmeter_setup_i2s(struct rackmeter *rm)
{
struct macio_chip *macio = rm->mdev->bus->chip;
/* First whack magic GPIO */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
/* Call feature code to enable the sound channel and the proper
* clock sources
*/
pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
/* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
* This is a bit racy, thus we should add new platform functions to
* handle that. snd-aoa needs that too
*/
MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
(void)MACIO_IN32(KEYLARGO_FCR1);
udelay(10);
/* Then setup i2s. For now, we use the same magic value that
* the OS X driver seems to use. We might want to play around
* with the clock divisors later
*/
out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
(void)in_le32(rm->i2s_regs + 0x10);
udelay(10);
/* Fully restart i2s*/
MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
KL1_I2S0_CLK_ENABLE_BIT);
(void)MACIO_IN32(KEYLARGO_FCR1);
udelay(10);
}
static void rackmeter_set_default_pattern(struct rackmeter *rm)
{
int i;
for (i = 0; i < 16; i++) {
if (i < 8)
rm->ubuf[i] = (i & 1) * 255;
else
rm->ubuf[i] = ((~i) & 1) * 255;
}
}
static void rackmeter_do_pause(struct rackmeter *rm, int pause)
{
struct rackmeter_dma *rdma = rm->dma_buf_v;
pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
rm->paused = pause;
if (pause) {
DBDMA_DO_STOP(rm->dma_regs);
return;
}
memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32));
memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32));
rm->dma_buf_v->mark = 0;
mb();
out_le32(&rm->dma_regs->cmdptr_hi, 0);
out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
}
static void rackmeter_setup_dbdma(struct rackmeter *rm)
{
struct rackmeter_dma *db = rm->dma_buf_v;
struct dbdma_cmd *cmd = db->cmd;
/* Make sure dbdma is reset */
DBDMA_DO_RESET(rm->dma_regs);
pr_debug("rackmeter: mark offset=0x%zx\n",
offsetof(struct rackmeter_dma, mark));
pr_debug("rackmeter: buf1 offset=0x%zx\n",
offsetof(struct rackmeter_dma, buf1));
pr_debug("rackmeter: buf2 offset=0x%zx\n",
offsetof(struct rackmeter_dma, buf2));
/* Prepare 4 dbdma commands for the 2 buffers */
memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
st_le16(&cmd->req_count, 4);
st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
st_le32(&cmd->phy_addr, rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
st_le32(&cmd->cmd_dep, 0x02000000);
cmd++;
st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
st_le16(&cmd->command, OUTPUT_MORE);
st_le32(&cmd->phy_addr, rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf1));
cmd++;
st_le16(&cmd->req_count, 4);
st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
st_le32(&cmd->phy_addr, rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
st_le32(&cmd->cmd_dep, 0x01000000);
cmd++;
st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS);
st_le32(&cmd->phy_addr, rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf2));
st_le32(&cmd->cmd_dep, rm->dma_buf_p);
rackmeter_do_pause(rm, 0);
}
static void rackmeter_do_timer(struct work_struct *work)
{
struct rackmeter_cpu *rcpu =
container_of(work, struct rackmeter_cpu, sniffer.work);
struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
cputime64_t cur_jiffies, total_idle_ticks;
unsigned int total_ticks, idle_ticks;
int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now,
* we'll do better once we have actual PWM implemented
*/
load = (9 * (total_ticks - idle_ticks)) / total_ticks;
offset = cpu << 3;
cumm = 0;
for (i = 0; i < 8; i++) {
u8 ub = (load > i) ? 0xff : 0;
rm->ubuf[i + offset] = ub;
cumm |= ub;
}
rcpu->zero = (cumm == 0);
/* Now check if LEDs are all 0, we can stop DMA */
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
if (pause != rm->paused) {
mutex_lock(&rm->sem);
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
rackmeter_do_pause(rm, pause);
mutex_unlock(&rm->sem);
}
schedule_delayed_work_on(cpu, &rcpu->sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
{
unsigned int cpu;
/* This driver works only with 1 or 2 CPUs numbered 0 and 1,
* but that's really all we have on Apple Xserve. It doesn't
* play very nice with CPU hotplug neither but we don't do that
* on those machines yet
*/
rm->cpu[0].rm = rm;
INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
rm->cpu[1].rm = rm;
INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
for_each_online_cpu(cpu) {
struct rackmeter_cpu *rcpu;
if (cpu > 1)
continue;
rcpu = &rm->cpu[cpu];
rcpu->prev_idle = get_cpu_idle_time(cpu);
rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
}
static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
{
cancel_delayed_work_sync(&rm->cpu[0].sniffer);
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
}
static int __devinit rackmeter_setup(struct rackmeter *rm)
{
pr_debug("rackmeter: setting up i2s..\n");
rackmeter_setup_i2s(rm);
pr_debug("rackmeter: setting up default pattern..\n");
rackmeter_set_default_pattern(rm);
pr_debug("rackmeter: setting up dbdma..\n");
rackmeter_setup_dbdma(rm);
pr_debug("rackmeter: start CPU measurements..\n");
rackmeter_init_cpu_sniffer(rm);
printk(KERN_INFO "RackMeter initialized\n");
return 0;
}
/* XXX FIXME: No PWM yet, this is 0/1 */
static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
{
int led;
u32 sample = 0;
for (led = 0; led < 16; led++) {
sample >>= 1;
sample |= ((rm->ubuf[led] >= 0x80) << 15);
}
return (sample << 17) | (sample >> 15);
}
static irqreturn_t rackmeter_irq(int irq, void *arg)
{
struct rackmeter *rm = arg;
struct rackmeter_dma *db = rm->dma_buf_v;
unsigned int mark, i;
u32 *buf;
/* Flush PCI buffers with an MMIO read. Maybe we could actually
* check the status one day ... in case things go wrong, though
* this never happened to me
*/
(void)in_le32(&rm->dma_regs->status);
/* Make sure the CPU gets us in order */
rmb();
/* Read mark */
mark = db->mark;
if (mark != 1 && mark != 2) {
printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
mark);
/* We allow for 3 errors like that (stale DBDMA irqs) */
if (++rm->stale_irq > 3) {
printk(KERN_ERR "rackmeter: Too many errors,"
" stopping DMA\n");
DBDMA_DO_RESET(rm->dma_regs);
}
return IRQ_HANDLED;
}
/* Next buffer we need to fill is mark value */
buf = mark == 1 ? db->buf1 : db->buf2;
/* Fill it now. This routine converts the 8 bits depth sample array
* into the PWM bitmap for each LED.
*/
for (i = 0; i < SAMPLE_COUNT; i++)
buf[i] = rackmeter_calc_sample(rm, i);
return IRQ_HANDLED;
}
static int __devinit rackmeter_probe(struct macio_dev* mdev,
const struct of_device_id *match)
{
struct device_node *i2s = NULL, *np = NULL;
struct rackmeter *rm = NULL;
struct resource ri2s, rdma;
int rc = -ENODEV;
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
if (strcmp(i2s->name, "i2s-a") == 0)
break;
if (i2s == NULL) {
pr_debug(" i2s-a child not found\n");
goto bail;
}
/* Get lightshow or virtual sound */
while ((np = of_get_next_child(i2s, np)) != NULL) {
if (strcmp(np->name, "lightshow") == 0)
break;
if ((strcmp(np->name, "sound") == 0) &&
of_get_property(np, "virtual", NULL) != NULL)
break;
}
if (np == NULL) {
pr_debug(" lightshow or sound+virtual child not found\n");
goto bail;
}
/* Create and initialize our instance data */
rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL);
if (rm == NULL) {
printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
rc = -ENOMEM;
goto bail_release;
}
rm->mdev = mdev;
rm->i2s = i2s;
mutex_init(&rm->sem);
dev_set_drvdata(&mdev->ofdev.dev, rm);
/* Check resources availability. We need at least resource 0 and 1 */
#if 0 /* Use that when i2s-a is finally an mdev per-se */
if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s"
" (%d resources, %d interrupts)\n",
mdev->ofdev.node->full_name);
rc = -ENXIO;
goto bail_free;
}
if (macio_request_resources(mdev, "rackmeter")) {
printk(KERN_ERR
"rackmeter: failed to request resources: %s\n",
mdev->ofdev.node->full_name);
rc = -EBUSY;
goto bail_free;
}
rm->irq = macio_irq(mdev, 1);
#else
rm->irq = irq_of_parse_and_map(i2s, 1);
if (rm->irq == NO_IRQ ||
of_address_to_resource(i2s, 0, &ri2s) ||
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s",
mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto bail_free;
}
#endif
pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start);
pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start);
pr_debug(" irq %d\n", rm->irq);
rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
if (rm->ubuf == NULL) {
printk(KERN_ERR
"rackmeter: failed to allocate samples page !\n");
rc = -ENOMEM;
goto bail_release;
}
rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
&rm->dma_buf_p, GFP_KERNEL);
if (rm->dma_buf_v == NULL) {
printk(KERN_ERR
"rackmeter: failed to allocate dma buffer !\n");
rc = -ENOMEM;
goto bail_free_samples;
}
#if 0
rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
#else
rm->i2s_regs = ioremap(ri2s.start, 0x1000);
#endif
if (rm->i2s_regs == NULL) {
printk(KERN_ERR
"rackmeter: failed to map i2s registers !\n");
rc = -ENXIO;
goto bail_free_dma;
}
#if 0
rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
#else
rm->dma_regs = ioremap(rdma.start, 0x100);
#endif
if (rm->dma_regs == NULL) {
printk(KERN_ERR
"rackmeter: failed to map dma registers !\n");
rc = -ENXIO;
goto bail_unmap_i2s;
}
rc = rackmeter_setup(rm);
if (rc) {
printk(KERN_ERR
"rackmeter: failed to initialize !\n");
rc = -ENXIO;
goto bail_unmap_dma;
}
rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
if (rc != 0) {
printk(KERN_ERR
"rackmeter: failed to request interrupt !\n");
goto bail_stop_dma;
}
of_node_put(np);
return 0;
bail_stop_dma:
DBDMA_DO_RESET(rm->dma_regs);
bail_unmap_dma:
iounmap(rm->dma_regs);
bail_unmap_i2s:
iounmap(rm->i2s_regs);
bail_free_dma:
dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
rm->dma_buf_v, rm->dma_buf_p);
bail_free_samples:
free_page((unsigned long)rm->ubuf);
bail_release:
#if 0
macio_release_resources(mdev);
#endif
bail_free:
kfree(rm);
bail:
of_node_put(i2s);
of_node_put(np);
dev_set_drvdata(&mdev->ofdev.dev, NULL);
return rc;
}
static int __devexit rackmeter_remove(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
/* Stop CPU sniffer timer & work queues */
rackmeter_stop_cpu_sniffer(rm);
/* Clear reference to private data */
dev_set_drvdata(&mdev->ofdev.dev, NULL);
/* Stop/reset dbdma */
DBDMA_DO_RESET(rm->dma_regs);
/* Release the IRQ */
free_irq(rm->irq, rm);
/* Unmap registers */
iounmap(rm->dma_regs);
iounmap(rm->i2s_regs);
/* Free DMA */
dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
rm->dma_buf_v, rm->dma_buf_p);
/* Free samples */
free_page((unsigned long)rm->ubuf);
#if 0
/* Release resources */
macio_release_resources(mdev);
#endif
/* Get rid of me */
kfree(rm);
return 0;
}
static int rackmeter_shutdown(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
if (rm == NULL)
return -ENODEV;
/* Stop CPU sniffer timer & work queues */
rackmeter_stop_cpu_sniffer(rm);
/* Stop/reset dbdma */
DBDMA_DO_RESET(rm->dma_regs);
return 0;
}
static struct of_device_id rackmeter_match[] = {
{ .name = "i2s" },
{ }
};
static struct macio_driver rackmeter_driver = {
.driver = {
.name = "rackmeter",
.owner = THIS_MODULE,
.of_match_table = rackmeter_match,
},
.probe = rackmeter_probe,
.remove = __devexit_p(rackmeter_remove),
.shutdown = rackmeter_shutdown,
};
static int __init rackmeter_init(void)
{
pr_debug("rackmeter_init()\n");
return macio_register_driver(&rackmeter_driver);
}
static void __exit rackmeter_exit(void)
{
pr_debug("rackmeter_exit()\n");
macio_unregister_driver(&rackmeter_driver);
}
module_init(rackmeter_init);
module_exit(rackmeter_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
| gpl-2.0 |
Tof37/Caf-msm-3.4 | arch/arm/mach-msm/btpintest.c | 3314 | 5465 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <mach/gpio.h>
#include <mach/vreg.h>
#include <mach/gpiomux.h>
#define VERSION "1.0"
struct dentry *pin_debugfs_dent;
/* UART GPIO lines for 8660 */
enum uartpins {
UARTDM_TX = 53,
UARTDM_RX = 54,
UARTDM_CTS = 55,
UARTDM_RFR = 56
};
/* Aux PCM GPIO lines for 8660 */
enum auxpcmpins {
AUX_PCM_CLK = 114,
AUX_PCM_SYNC = 113,
AUX_PCM_DIN = 112,
AUX_PCM_DOUT = 111
};
/*Number of UART and PCM pins */
#define PIN_COUNT 8
static struct gpiomux_setting pin_test_config = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_NONE,
};
/* Static array to intialise the return config */
static struct gpiomux_setting currentconfig[2*PIN_COUNT];
static struct msm_gpiomux_config pin_test_configs[] = {
{
.gpio = AUX_PCM_DOUT,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = AUX_PCM_DIN,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = AUX_PCM_SYNC,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = AUX_PCM_CLK,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = UARTDM_TX,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = UARTDM_RX,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = UARTDM_CTS,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
{
.gpio = UARTDM_RFR,
.settings = {
[GPIOMUX_ACTIVE] = &pin_test_config,
[GPIOMUX_SUSPENDED] = &pin_test_config,
},
},
};
static struct msm_gpiomux_config pin_config[PIN_COUNT];
static int pintest_open(struct inode *inode, struct file *file)
{
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
return 0;
}
static int pintest_release(struct inode *inode, struct file *file)
{
return 0;
}
static int configure_pins(struct msm_gpiomux_config *config,
struct msm_gpiomux_config *oldconfig,
unsigned int num_configs)
{
int rc = 0, j, i;
for (i = 0; i < num_configs; i++) {
for (j = 0; j < GPIOMUX_NSETTINGS; j++) {
(oldconfig + i)->gpio = (config + i)->gpio;
rc = msm_gpiomux_write((config + i)->gpio,
j,
(config + i)->settings[j],
(oldconfig + i)->settings[j]);
if (rc < 0)
break;
}
}
return rc;
}
static void init_current_config_pointers(void)
{
int i = 0, j = 0;
/* The current config variables will hold the current configuration
* which is getting overwritten during a msm_gpiomux_write call
*/
for (i = 0, j = 0; i < PIN_COUNT; i += 1, j += 2) {
pin_config[i].settings[GPIOMUX_ACTIVE] = ¤tconfig[j];
pin_config[i].settings[GPIOMUX_SUSPENDED] =
¤tconfig[j + 1];
}
}
static ssize_t pintest_write(
struct file *file,
const char __user *buff,
size_t count,
loff_t *ppos)
{
char mode;
int rc = 0;
if (count < 1)
return -EINVAL;
if (buff == NULL)
return -EINVAL;
if (copy_from_user(&mode, buff, count))
return -EFAULT;
mode = mode - '0';
init_current_config_pointers();
if (mode) {
/* Configure all pin test gpios for the custom settings */
rc = configure_pins(pin_test_configs, pin_config,
ARRAY_SIZE(pin_test_configs));
if (rc < 0)
return rc;
} else {
/* Configure all pin test gpios for the original settings */
rc = configure_pins(pin_config, pin_test_configs,
ARRAY_SIZE(pin_test_configs));
if (rc < 0)
return rc;
}
return rc;
}
static const struct file_operations pintest_debugfs_fops = {
.open = pintest_open,
.release = pintest_release,
.write = pintest_write,
};
static int __init bluepintest_init(void)
{
pin_debugfs_dent = debugfs_create_dir("btpintest", NULL);
if (IS_ERR(pin_debugfs_dent)) {
printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
__FILE__, __LINE__, PTR_ERR(pin_debugfs_dent));
return -ENOMEM;
}
if (debugfs_create_file("enable", 0644, pin_debugfs_dent,
0, &pintest_debugfs_fops) == NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
__FILE__, __LINE__);
return -ENOMEM;
}
return 0;
}
static void __exit bluepintest_exit(void)
{
debugfs_remove_recursive(pin_debugfs_dent);
}
module_init(bluepintest_init);
module_exit(bluepintest_exit);
MODULE_DESCRIPTION("Bluetooth Pin Connectivty Test Driver ver %s " VERSION);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
ROM-Jeremy/android_kernel_x5 | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 3570 | 16762 | #include <linux/perf_event.h>
#include <linux/types.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
#include <asm/insn.h>
#include "perf_event.h"
enum {
LBR_FORMAT_32 = 0x00,
LBR_FORMAT_LIP = 0x01,
LBR_FORMAT_EIP = 0x02,
LBR_FORMAT_EIP_FLAGS = 0x03,
};
/*
* Intel LBR_SELECT bits
* Intel Vol3a, April 2011, Section 16.7 Table 16-10
*
* Hardware branch filter (not available on all CPUs)
*/
#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
#define LBR_JCC_BIT 2 /* do not capture conditional branches */
#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
#define LBR_RETURN_BIT 5 /* do not capture near returns */
#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
#define LBR_FAR_BIT 8 /* do not capture far branches */
#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
#define LBR_USER (1 << LBR_USER_BIT)
#define LBR_JCC (1 << LBR_JCC_BIT)
#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
#define LBR_RETURN (1 << LBR_RETURN_BIT)
#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
#define LBR_FAR (1 << LBR_FAR_BIT)
#define LBR_PLM (LBR_KERNEL | LBR_USER)
#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
#define LBR_NOT_SUPP -1 /* LBR filter not supported */
#define LBR_IGN 0 /* ignored */
#define LBR_ANY \
(LBR_JCC |\
LBR_REL_CALL |\
LBR_IND_CALL |\
LBR_RETURN |\
LBR_REL_JMP |\
LBR_IND_JMP |\
LBR_FAR)
#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
#define for_each_branch_sample_type(x) \
for ((x) = PERF_SAMPLE_BRANCH_USER; \
(x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
/*
* x86control flow change classification
* x86control flow changes include branches, interrupts, traps, faults
*/
enum {
X86_BR_NONE = 0, /* unknown */
X86_BR_USER = 1 << 0, /* branch target is user */
X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
X86_BR_CALL = 1 << 2, /* call */
X86_BR_RET = 1 << 3, /* return */
X86_BR_SYSCALL = 1 << 4, /* syscall */
X86_BR_SYSRET = 1 << 5, /* syscall return */
X86_BR_INT = 1 << 6, /* sw interrupt */
X86_BR_IRET = 1 << 7, /* return from interrupt */
X86_BR_JCC = 1 << 8, /* conditional */
X86_BR_JMP = 1 << 9, /* jump */
X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
X86_BR_IND_CALL = 1 << 11,/* indirect calls */
};
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
#define X86_BR_ANY \
(X86_BR_CALL |\
X86_BR_RET |\
X86_BR_SYSCALL |\
X86_BR_SYSRET |\
X86_BR_INT |\
X86_BR_IRET |\
X86_BR_JCC |\
X86_BR_JMP |\
X86_BR_IRQ |\
X86_BR_IND_CALL)
#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
#define X86_BR_ANY_CALL \
(X86_BR_CALL |\
X86_BR_IND_CALL |\
X86_BR_SYSCALL |\
X86_BR_IRQ |\
X86_BR_INT)
static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
/*
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
* otherwise it becomes near impossible to get a reliable stack.
*/
static void __intel_pmu_lbr_enable(void)
{
u64 debugctl;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
static void __intel_pmu_lbr_disable(void)
{
u64 debugctl;
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
static void intel_pmu_lbr_reset_32(void)
{
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++)
wrmsrl(x86_pmu.lbr_from + i, 0);
}
static void intel_pmu_lbr_reset_64(void)
{
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
wrmsrl(x86_pmu.lbr_from + i, 0);
wrmsrl(x86_pmu.lbr_to + i, 0);
}
}
void intel_pmu_lbr_reset(void)
{
if (!x86_pmu.lbr_nr)
return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
intel_pmu_lbr_reset_32();
else
intel_pmu_lbr_reset_64();
}
void intel_pmu_lbr_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!x86_pmu.lbr_nr)
return;
/*
* Reset the LBR stack if we changed task context to
* avoid data leaks.
*/
if (event->ctx->task && cpuc->lbr_context != event->ctx) {
intel_pmu_lbr_reset();
cpuc->lbr_context = event->ctx;
}
cpuc->br_sel = event->hw.branch_reg.reg;
cpuc->lbr_users++;
}
void intel_pmu_lbr_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!x86_pmu.lbr_nr)
return;
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
if (cpuc->enabled && !cpuc->lbr_users) {
__intel_pmu_lbr_disable();
/* avoid stale pointer */
cpuc->lbr_context = NULL;
}
}
void intel_pmu_lbr_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->lbr_users)
__intel_pmu_lbr_enable();
}
void intel_pmu_lbr_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->lbr_users)
__intel_pmu_lbr_disable();
}
/*
* TOS = most recently recorded branch
*/
static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;
rdmsrl(x86_pmu.lbr_tos, tos);
return tos;
}
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
u64 tos = intel_pmu_lbr_tos();
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
unsigned long lbr_idx = (tos - i) & mask;
union {
struct {
u32 from;
u32 to;
};
u64 lbr;
} msr_lastbranch;
rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
cpuc->lbr_entries[i].from = msr_lastbranch.from;
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
}
/*
* Due to lack of segmentation in Linux the effective address (offset)
* is the same as the linear address, allowing us to merge the LIP and EIP
* LBR formats.
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
int i;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
unsigned long lbr_idx = (tos - i) & mask;
u64 from, to, mis = 0, pred = 0;
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
mis = !!(from & LBR_FROM_FLAG_MISPRED);
pred = !mis;
from = (u64)((((s64)from) << 1) >> 1);
}
cpuc->lbr_entries[i].from = from;
cpuc->lbr_entries[i].to = to;
cpuc->lbr_entries[i].mispred = mis;
cpuc->lbr_entries[i].predicted = pred;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
}
void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->lbr_users)
return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
intel_pmu_lbr_read_32(cpuc);
else
intel_pmu_lbr_read_64(cpuc);
intel_pmu_lbr_filter(cpuc);
}
/*
* SW filter is used:
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
if (br_type & PERF_SAMPLE_BRANCH_USER)
mask |= X86_BR_USER;
if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
mask |= X86_BR_KERNEL;
/* we ignore BRANCH_HV here */
if (br_type & PERF_SAMPLE_BRANCH_ANY)
mask |= X86_BR_ANY;
if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
mask |= X86_BR_ANY_CALL;
if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
mask |= X86_BR_IND_CALL;
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
}
/*
* setup the HW LBR filter
* Used only when available, may not be enough to disambiguate
* all branches, may need the help of the SW filter
*/
static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{
struct hw_perf_event_extra *reg;
u64 br_type = event->attr.branch_sample_type;
u64 mask = 0, m;
u64 v;
for_each_branch_sample_type(m) {
if (!(br_type & m))
continue;
v = x86_pmu.lbr_sel_map[m];
if (v == LBR_NOT_SUPP)
return -EOPNOTSUPP;
if (v != LBR_IGN)
mask |= v;
}
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;
/* LBR_SELECT operates in suppress mode so invert mask */
reg->config = ~mask & x86_pmu.lbr_sel_mask;
return 0;
}
int intel_pmu_setup_lbr_filter(struct perf_event *event)
{
int ret = 0;
/*
* no LBR on this PMU
*/
if (!x86_pmu.lbr_nr)
return -EOPNOTSUPP;
/*
* setup SW LBR filter
*/
intel_pmu_setup_sw_lbr_filter(event);
/*
* setup HW LBR filter, if any
*/
if (x86_pmu.lbr_sel_map)
ret = intel_pmu_setup_hw_lbr_filter(event);
return ret;
}
/*
* return the type of control flow change at address "from"
* intruction is not necessarily a branch (in case of interrupt).
*
* The branch type returned also includes the priv level of the
* target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
*
* If a branch type is unknown OR the instruction cannot be
* decoded (e.g., text page not present), then X86_BR_NONE is
* returned.
*/
static int branch_type(unsigned long from, unsigned long to)
{
struct insn insn;
void *addr;
int bytes, size = MAX_INSN_SIZE;
int ret = X86_BR_NONE;
int ext, to_plm, from_plm;
u8 buf[MAX_INSN_SIZE];
int is64 = 0;
to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
/*
* maybe zero if lbr did not fill up after a reset by the time
* we get a PMU interrupt
*/
if (from == 0 || to == 0)
return X86_BR_NONE;
if (from_plm == X86_BR_USER) {
/*
* can happen if measuring at the user level only
* and we interrupt in a kernel thread, e.g., idle.
*/
if (!current->mm)
return X86_BR_NONE;
/* may fail if text not present */
bytes = copy_from_user_nmi(buf, (void __user *)from, size);
if (bytes != size)
return X86_BR_NONE;
addr = buf;
} else
addr = (void *)from;
/*
* decoder needs to know the ABI especially
* on 64-bit systems running 32-bit apps
*/
#ifdef CONFIG_X86_64
is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
#endif
insn_init(&insn, addr, is64);
insn_get_opcode(&insn);
switch (insn.opcode.bytes[0]) {
case 0xf:
switch (insn.opcode.bytes[1]) {
case 0x05: /* syscall */
case 0x34: /* sysenter */
ret = X86_BR_SYSCALL;
break;
case 0x07: /* sysret */
case 0x35: /* sysexit */
ret = X86_BR_SYSRET;
break;
case 0x80 ... 0x8f: /* conditional */
ret = X86_BR_JCC;
break;
default:
ret = X86_BR_NONE;
}
break;
case 0x70 ... 0x7f: /* conditional */
ret = X86_BR_JCC;
break;
case 0xc2: /* near ret */
case 0xc3: /* near ret */
case 0xca: /* far ret */
case 0xcb: /* far ret */
ret = X86_BR_RET;
break;
case 0xcf: /* iret */
ret = X86_BR_IRET;
break;
case 0xcc ... 0xce: /* int */
ret = X86_BR_INT;
break;
case 0xe8: /* call near rel */
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
case 0xe0 ... 0xe3: /* loop jmp */
ret = X86_BR_JCC;
break;
case 0xe9 ... 0xeb: /* jmp */
ret = X86_BR_JMP;
break;
case 0xff: /* call near absolute, call far absolute ind */
insn_get_modrm(&insn);
ext = (insn.modrm.bytes[0] >> 3) & 0x7;
switch (ext) {
case 2: /* near ind call */
case 3: /* far ind call */
ret = X86_BR_IND_CALL;
break;
case 4:
case 5:
ret = X86_BR_JMP;
break;
}
break;
default:
ret = X86_BR_NONE;
}
/*
* interrupts, traps, faults (and thus ring transition) may
* occur on any instructions. Thus, to classify them correctly,
* we need to first look at the from and to priv levels. If they
* are different and to is in the kernel, then it indicates
* a ring transition. If the from instruction is not a ring
* transition instr (syscall, systenter, int), then it means
* it was a irq, trap or fault.
*
* we have no way of detecting kernel to kernel faults.
*/
if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
&& ret != X86_BR_SYSCALL && ret != X86_BR_INT)
ret = X86_BR_IRQ;
/*
* branch priv level determined by target as
* is done by HW when LBR_SELECT is implemented
*/
if (ret != X86_BR_NONE)
ret |= to_plm;
return ret;
}
/*
* implement actual branch filter based on user demand.
* Hardware may not exactly satisfy that request, thus
* we need to inspect opcodes. Mismatched branches are
* discarded. Therefore, the number of branches returned
* in PERF_SAMPLE_BRANCH_STACK sample may vary.
*/
static void
intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
{
u64 from, to;
int br_sel = cpuc->br_sel;
int i, j, type;
bool compress = false;
/* if sampling all branches, then nothing to filter */
if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
return;
for (i = 0; i < cpuc->lbr_stack.nr; i++) {
from = cpuc->lbr_entries[i].from;
to = cpuc->lbr_entries[i].to;
type = branch_type(from, to);
/* if type does not correspond, then discard */
if (type == X86_BR_NONE || (br_sel & type) != type) {
cpuc->lbr_entries[i].from = 0;
compress = true;
}
}
if (!compress)
return;
/* remove all entries with from=0 */
for (i = 0; i < cpuc->lbr_stack.nr; ) {
if (!cpuc->lbr_entries[i].from) {
j = i;
while (++j < cpuc->lbr_stack.nr)
cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
cpuc->lbr_stack.nr--;
if (!cpuc->lbr_entries[i].from)
continue;
}
i++;
}
}
/*
* Map interface branch filters onto LBR filters
*/
static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
[PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
| LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
*/
[PERF_SAMPLE_BRANCH_ANY_CALL] =
LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include IND_JMP to capture IND_CALL
*/
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
};
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
[PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
[PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
| LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
};
/* core */
void intel_pmu_lbr_init_core(void)
{
x86_pmu.lbr_nr = 4;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
x86_pmu.lbr_to = MSR_LBR_CORE_TO;
/*
* SW branch filter usage:
* - compensate for lack of HW filter
*/
pr_cont("4-deep LBR, ");
}
/* nehalem/westmere */
void intel_pmu_lbr_init_nhm(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu.lbr_to = MSR_LBR_NHM_TO;
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
/*
* SW branch filter usage:
* - workaround LBR_SEL errata (see above)
* - support syscall, sysret capture.
* That requires LBR_FAR but that means far
* jmp need to be filtered out
*/
pr_cont("16-deep LBR, ");
}
/* sandy bridge */
void intel_pmu_lbr_init_snb(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu.lbr_to = MSR_LBR_NHM_TO;
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
/*
* SW branch filter usage:
* - support syscall, sysret capture.
* That requires LBR_FAR but that means far
* jmp need to be filtered out
*/
pr_cont("16-deep LBR, ");
}
/* atom */
void intel_pmu_lbr_init_atom(void)
{
/*
* only models starting at stepping 10 seems
* to have an operational LBR which can freeze
* on PMU interrupt
*/
if (boot_cpu_data.x86_mask < 10) {
pr_cont("LBR disabled due to erratum");
return;
}
x86_pmu.lbr_nr = 8;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
x86_pmu.lbr_to = MSR_LBR_CORE_TO;
/*
* SW branch filter usage:
* - compensate for lack of HW filter
*/
pr_cont("8-deep LBR, ");
}
| gpl-2.0 |
AuxXxi/caf_kernel | drivers/net/ethernet/amd/declance.c | 4850 | 35510 | /*
* Lance ethernet driver for the MIPS processor based
* DECstation family
*
*
* adopted from sunlance.c by Richard van den Berg
*
* Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
*
* additional sources:
* - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
* Revision 1.2
*
* History:
*
* v0.001: The kernel accepts the code and it shows the hardware address.
*
* v0.002: Removed most sparc stuff, left only some module and dma stuff.
*
* v0.003: Enhanced base address calculation from proposals by
* Harald Koerfgen and Thomas Riemer.
*
* v0.004: lance-regs is pointing at the right addresses, added prom
* check. First start of address mapping and DMA.
*
* v0.005: started to play around with LANCE-DMA. This driver will not
* work for non IOASIC lances. HK
*
* v0.006: added pointer arrays to lance_private and setup routine for
* them in dec_lance_init. HK
*
* v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
* access the init block. This looks like one (short) word at a
* time, but the smallest amount the IOASIC can transfer is a
* (long) word. So we have a 2-2 padding here. Changed
* lance_init_block accordingly. The 16-16 padding for the buffers
* seems to be correct. HK
*
* v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
*
* v0.009: Module support fixes, multiple interfaces support, various
* bits. macro
*
* v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
* PMAX requirement to only use halfword accesses to the
* buffer. macro
*
* v0.011: Converted the PMAD to the driver model. macro
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/tc.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/kn01.h>
#include <asm/dec/machtype.h>
#include <asm/dec/system.h>
static char version[] __devinitdata =
"declance.c: v0.011 by Linux MIPS DECstation task force\n";
MODULE_AUTHOR("Linux MIPS DECstation task force");
MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
MODULE_LICENSE("GPL");
#define __unused __attribute__ ((unused))
/*
* card types
*/
#define ASIC_LANCE 1
#define PMAD_LANCE 2
#define PMAX_LANCE 3
#define LE_CSR0 0
#define LE_CSR1 1
#define LE_CSR2 2
#define LE_CSR3 3
#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
#define LE_C0_MERR 0x0800 /* ME: Memory error */
#define LE_C0_RINT 0x0400 /* Received interrupt */
#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
#define LE_C0_INTR 0x0080 /* Interrupt or error */
#define LE_C0_INEA 0x0040 /* Interrupt enable */
#define LE_C0_RXON 0x0020 /* Receiver on */
#define LE_C0_TXON 0x0010 /* Transmitter on */
#define LE_C0_TDMD 0x0008 /* Transmitter demand */
#define LE_C0_STOP 0x0004 /* Stop the card */
#define LE_C0_STRT 0x0002 /* Start the card */
#define LE_C0_INIT 0x0001 /* Init the card */
#define LE_C3_BSWP 0x4 /* SWAP */
#define LE_C3_ACON 0x2 /* ALE Control */
#define LE_C3_BCON 0x1 /* Byte control */
/* Receive message descriptor 1 */
#define LE_R1_OWN 0x8000 /* Who owns the entry */
#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
#define LE_R1_FRA 0x2000 /* FRA: Frame error */
#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
#define LE_R1_CRC 0x0800 /* CRC error */
#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
#define LE_R1_SOP 0x0200 /* Start of packet */
#define LE_R1_EOP 0x0100 /* End of packet */
#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
/* Transmit message descriptor 1 */
#define LE_T1_OWN 0x8000 /* Lance owns the packet */
#define LE_T1_ERR 0x4000 /* Error summary */
#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
#define LE_T1_EONE 0x0800 /* Error: one retry needed */
#define LE_T1_EDEF 0x0400 /* Error: deferred */
#define LE_T1_SOP 0x0200 /* Start of packet */
#define LE_T1_EOP 0x0100 /* End of packet */
#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
#define LE_T3_BUF 0x8000 /* Buffer error */
#define LE_T3_UFL 0x4000 /* Error underflow */
#define LE_T3_LCOL 0x1000 /* Error late collision */
#define LE_T3_CLOS 0x0800 /* Error carrier loss */
#define LE_T3_RTY 0x0400 /* Error retry */
#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
#ifndef LANCE_LOG_TX_BUFFERS
#define LANCE_LOG_TX_BUFFERS 4
#define LANCE_LOG_RX_BUFFERS 4
#endif
#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
#define PKT_BUF_SZ 1536
#define RX_BUFF_SIZE PKT_BUF_SZ
#define TX_BUFF_SIZE PKT_BUF_SZ
#undef TEST_HITS
#define ZERO 0
/*
* The DS2100/3100 have a linear 64 kB buffer which supports halfword
* accesses only. Each halfword of the buffer is word-aligned in the
* CPU address space.
*
* The PMAD-AA has a 128 kB buffer on-board.
*
* The IOASIC LANCE devices use a shared memory region. This region
* as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
* boundary. The LANCE sees this as a 64 kB long continuous memory
* region.
*
* The LANCE's DMA address is used as an index in this buffer and DMA
* takes place in bursts of eight 16-bit words which are packed into
* four 32-bit words by the IOASIC. This leads to a strange padding:
* 16 bytes of valid data followed by a 16 byte gap :-(.
*/
struct lance_rx_desc {
unsigned short rmd0; /* low address of packet */
unsigned short rmd1; /* high address of packet
and descriptor bits */
short length; /* 2s complement (negative!)
of buffer length */
unsigned short mblength; /* actual number of bytes received */
};
struct lance_tx_desc {
unsigned short tmd0; /* low address of packet */
unsigned short tmd1; /* high address of packet
and descriptor bits */
short length; /* 2s complement (negative!)
of buffer length */
unsigned short misc;
};
/* First part of the LANCE initialization block, described in databook. */
struct lance_init_block {
unsigned short mode; /* pre-set mode (reg. 15) */
unsigned short phys_addr[3]; /* physical ethernet address */
unsigned short filter[4]; /* multicast filter */
/* Receive and transmit ring base, along with extra bits. */
unsigned short rx_ptr; /* receive descriptor addr */
unsigned short rx_len; /* receive len and high addr */
unsigned short tx_ptr; /* transmit descriptor addr */
unsigned short tx_len; /* transmit len and high addr */
short gap[4];
/* The buffer descriptors */
struct lance_rx_desc brx_ring[RX_RING_SIZE];
struct lance_tx_desc btx_ring[TX_RING_SIZE];
};
#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
#define shift_off(off, type) \
(type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
#define lib_off(rt, type) \
shift_off(offsetof(struct lance_init_block, rt), type)
#define lib_ptr(ib, rt, type) \
((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
#define rds_off(rt, type) \
shift_off(offsetof(struct lance_rx_desc, rt), type)
#define rds_ptr(rd, rt, type) \
((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
#define tds_off(rt, type) \
shift_off(offsetof(struct lance_tx_desc, rt), type)
#define tds_ptr(td, rt, type) \
((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
struct lance_private {
struct net_device *next;
int type;
int dma_irq;
volatile struct lance_regs *ll;
spinlock_t lock;
int rx_new, tx_new;
int rx_old, tx_old;
unsigned short busmaster_regval;
struct timer_list multicast_timer;
/* Pointers to the ring buffers as seen from the CPU */
char *rx_buf_ptr_cpu[RX_RING_SIZE];
char *tx_buf_ptr_cpu[TX_RING_SIZE];
/* Pointers to the ring buffers as seen from the LANCE */
uint rx_buf_ptr_lnc[RX_RING_SIZE];
uint tx_buf_ptr_lnc[TX_RING_SIZE];
};
#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
lp->tx_old - lp->tx_new-1)
/* The lance control ports are at an absolute address, machine and tc-slot
* dependent.
* DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
* so we have to give the structure an extra member making rap pointing
* at the right address
*/
struct lance_regs {
volatile unsigned short rdp; /* register data port */
unsigned short pad;
volatile unsigned short rap; /* register address port */
};
int dec_lance_debug = 2;
static struct tc_driver dec_lance_tc_driver;
static struct net_device *root_lance_dev;
static inline void writereg(volatile unsigned short *regptr, short value)
{
*regptr = value;
iob();
}
/* Load the CSR registers */
static void load_csrs(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
uint leptr;
/* The address space as seen from the LANCE
* begins at address 0. HK
*/
leptr = 0;
writereg(&ll->rap, LE_CSR1);
writereg(&ll->rdp, (leptr & 0xFFFF));
writereg(&ll->rap, LE_CSR2);
writereg(&ll->rdp, leptr >> 16);
writereg(&ll->rap, LE_CSR3);
writereg(&ll->rdp, lp->busmaster_regval);
/* Point back to csr0 */
writereg(&ll->rap, LE_CSR0);
}
/*
* Our specialized copy routines
*
*/
static void cp_to_buf(const int type, void *to, const void *from, int len)
{
unsigned short *tp;
const unsigned short *fp;
unsigned short clen;
unsigned char *rtp;
const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
tp = to;
fp = from;
while (clen--) {
*tp++ = *fp++;
tp++;
}
clen = len & 1;
rtp = tp;
rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
}
} else {
/*
* copy 16 Byte chunks
*/
clen = len >> 4;
tp = to;
fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
tp += 8;
}
/*
* do the rest, if any.
*/
clen = len & 15;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
}
iob();
}
static void cp_from_buf(const int type, void *to, const void *from, int len)
{
unsigned short *tp;
const unsigned short *fp;
unsigned short clen;
unsigned char *rtp;
const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
tp = to;
fp = from;
while (clen--) {
*tp++ = *fp++;
fp++;
}
clen = len & 1;
rtp = tp;
rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
}
} else {
/*
* copy 16 Byte chunks
*/
clen = len >> 4;
tp = to;
fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
fp += 8;
}
/*
* do the rest, if any.
*/
clen = len & 15;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
}
}
/* Setup the Lance Rx and Tx rings */
static void lance_init_ring(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
uint leptr;
int i;
/* Lock out other processes while setting up hardware */
netif_stop_queue(dev);
lp->rx_new = lp->tx_new = 0;
lp->rx_old = lp->tx_old = 0;
/* Copy the ethernet address to the lance init block.
* XXX bit 0 of the physical address registers has to be zero
*/
*lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
dev->dev_addr[0];
*lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
dev->dev_addr[2];
*lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
dev->dev_addr[4];
/* Setup the initialization block */
/* Setup rx descriptor pointer */
leptr = offsetof(struct lance_init_block, brx_ring);
*lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
(leptr >> 16);
*lib_ptr(ib, rx_ptr, lp->type) = leptr;
if (ZERO)
printk("RX ptr: %8.8x(%8.8x)\n",
leptr, lib_off(brx_ring, lp->type));
/* Setup tx descriptor pointer */
leptr = offsetof(struct lance_init_block, btx_ring);
*lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
(leptr >> 16);
*lib_ptr(ib, tx_ptr, lp->type) = leptr;
if (ZERO)
printk("TX ptr: %8.8x(%8.8x)\n",
leptr, lib_off(btx_ring, lp->type));
if (ZERO)
printk("TX rings:\n");
/* Setup the Tx ring entries */
for (i = 0; i < TX_RING_SIZE; i++) {
leptr = lp->tx_buf_ptr_lnc[i];
*lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
*lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
0xff;
*lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
/* The ones required by tmd2 */
*lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
if (i < 3 && ZERO)
printk("%d: 0x%8.8x(0x%8.8x)\n",
i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
}
/* Setup the Rx ring entries */
if (ZERO)
printk("RX rings:\n");
for (i = 0; i < RX_RING_SIZE; i++) {
leptr = lp->rx_buf_ptr_lnc[i];
*lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
*lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
0xff) |
LE_R1_OWN;
*lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
0xf000;
*lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
if (i < 3 && ZERO)
printk("%d: 0x%8.8x(0x%8.8x)\n",
i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
}
iob();
}
static int init_restart_lance(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
int i;
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_INIT);
/* Wait for the lance to complete initialization */
for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
udelay(10);
}
if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
i, ll->rdp);
return -1;
}
if ((ll->rdp & LE_C0_ERR)) {
printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
i, ll->rdp);
return -1;
}
writereg(&ll->rdp, LE_C0_IDON);
writereg(&ll->rdp, LE_C0_STRT);
writereg(&ll->rdp, LE_C0_INEA);
return 0;
}
static int lance_rx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile u16 *rd;
unsigned short bits;
int entry, len;
struct sk_buff *skb;
#ifdef TEST_HITS
{
int i;
printk("[");
for (i = 0; i < RX_RING_SIZE; i++) {
if (i == lp->rx_new)
printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
lp->type) &
LE_R1_OWN ? "_" : "X");
else
printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
lp->type) &
LE_R1_OWN ? "." : "1");
}
printk("]");
}
#endif
for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
!((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
entry = lp->rx_new;
/* We got an incomplete frame? */
if ((bits & LE_R1_POK) != LE_R1_POK) {
dev->stats.rx_over_errors++;
dev->stats.rx_errors++;
} else if (bits & LE_R1_ERR) {
/* Count only the end frame as a rx error,
* not the beginning
*/
if (bits & LE_R1_BUF)
dev->stats.rx_fifo_errors++;
if (bits & LE_R1_CRC)
dev->stats.rx_crc_errors++;
if (bits & LE_R1_OFL)
dev->stats.rx_over_errors++;
if (bits & LE_R1_FRA)
dev->stats.rx_frame_errors++;
if (bits & LE_R1_EOP)
dev->stats.rx_errors++;
} else {
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
printk("%s: Memory squeeze, deferring packet.\n",
dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
((lp->rx_buf_ptr_lnc[entry] >> 16) &
0xff) | LE_R1_OWN;
lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
return 0;
}
dev->stats.rx_bytes += len;
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
(char *)lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
/* Return the packet to the pool */
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
*rds_ptr(rd, rmd1, lp->type) =
((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
}
return 0;
}
static void lance_tx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile struct lance_regs *ll = lp->ll;
volatile u16 *td;
int i, j;
int status;
j = lp->tx_old;
spin_lock(&lp->lock);
for (i = j; i != lp->tx_new; i = j) {
td = lib_ptr(ib, btx_ring[i], lp->type);
/* If we hit a packet not owned by us, stop */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
break;
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
status = *tds_ptr(td, misc, lp->type);
dev->stats.tx_errors++;
if (status & LE_T3_RTY)
dev->stats.tx_aborted_errors++;
if (status & LE_T3_LCOL)
dev->stats.tx_window_errors++;
if (status & LE_T3_CLOS) {
dev->stats.tx_carrier_errors++;
printk("%s: Carrier Lost\n", dev->name);
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
goto out;
}
/* Buffer errors and underflows turn off the
* transmitter, restart the adapter.
*/
if (status & (LE_T3_BUF | LE_T3_UFL)) {
dev->stats.tx_fifo_errors++;
printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
dev->name);
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
goto out;
}
} else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
LE_T1_POK) {
/*
* So we don't count the packet more than once.
*/
*tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
/* One collision before packet was sent. */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
dev->stats.collisions++;
/* More than one collision, be optimistic. */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
dev->stats.collisions += 2;
dev->stats.tx_packets++;
}
j = (j + 1) & TX_RING_MOD_MASK;
}
lp->tx_old = j;
out:
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev);
spin_unlock(&lp->lock);
}
static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
static irqreturn_t lance_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int csr0;
writereg(&ll->rap, LE_CSR0);
csr0 = ll->rdp;
/* Acknowledge all the interrupt sources ASAP */
writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
if ((csr0 & LE_C0_ERR)) {
/* Clear the error condition */
writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
LE_C0_CERR | LE_C0_MERR);
}
if (csr0 & LE_C0_RINT)
lance_rx(dev);
if (csr0 & LE_C0_TINT)
lance_tx(dev);
if (csr0 & LE_C0_BABL)
dev->stats.tx_errors++;
if (csr0 & LE_C0_MISS)
dev->stats.rx_errors++;
if (csr0 & LE_C0_MERR) {
printk("%s: Memory error, status %04x\n", dev->name, csr0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
netif_wake_queue(dev);
}
writereg(&ll->rdp, LE_C0_INEA);
writereg(&ll->rdp, LE_C0_INEA);
return IRQ_HANDLED;
}
static int lance_open(struct net_device *dev)
{
volatile u16 *ib = (volatile u16 *)dev->mem_start;
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int status = 0;
/* Stop the Lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
/* Set mode and clear multicast filter only at device open,
* so that lance_init_ring() called at any error will not
* forget multicast filters.
*
* BTW it is common bug in all lance drivers! --ANK
*/
*lib_ptr(ib, mode, lp->type) = 0;
*lib_ptr(ib, filter[0], lp->type) = 0;
*lib_ptr(ib, filter[1], lp->type) = 0;
*lib_ptr(ib, filter[2], lp->type) = 0;
*lib_ptr(ib, filter[3], lp->type) = 0;
lance_init_ring(dev);
load_csrs(lp);
netif_start_queue(dev);
/* Associate IRQ with lance_interrupt */
if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
}
if (lp->dma_irq >= 0) {
unsigned long flags;
if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
lp->dma_irq);
return -EAGAIN;
}
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
/* Enable I/O ASIC LANCE DMA. */
ioasic_write(IO_REG_SSR,
ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
fast_mb();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
status = init_restart_lance(lp);
return status;
}
static int lance_close(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
del_timer_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
if (lp->dma_irq >= 0) {
unsigned long flags;
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
/* Disable I/O ASIC LANCE DMA. */
ioasic_write(IO_REG_SSR,
ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
free_irq(lp->dma_irq, dev);
}
free_irq(dev->irq, dev);
return 0;
}
static inline int lance_reset(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int status;
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
static void lance_tx_timeout(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
dev->name, ll->rdp);
lance_reset(dev);
netif_wake_queue(dev);
}
static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
volatile u16 *ib = (volatile u16 *)dev->mem_start;
unsigned long flags;
int entry, len;
len = skb->len;
if (len < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
len = ETH_ZLEN;
}
dev->stats.tx_bytes += len;
spin_lock_irqsave(&lp->lock, flags);
entry = lp->tx_new;
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
(LE_T1_POK | LE_T1_OWN);
lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
if (TX_BUFFS_AVAIL <= 0)
netif_stop_queue(dev);
/* Kick the lance: transmit now */
writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
struct netdev_hw_addr *ha;
u32 crc;
/* set all multicast bits */
if (dev->flags & IFF_ALLMULTI) {
*lib_ptr(ib, filter[0], lp->type) = 0xffff;
*lib_ptr(ib, filter[1], lp->type) = 0xffff;
*lib_ptr(ib, filter[2], lp->type) = 0xffff;
*lib_ptr(ib, filter[3], lp->type) = 0xffff;
return;
}
/* clear the multicast filter */
*lib_ptr(ib, filter[0], lp->type) = 0;
*lib_ptr(ib, filter[1], lp->type) = 0;
*lib_ptr(ib, filter[2], lp->type) = 0;
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
}
static void lance_set_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile struct lance_regs *ll = lp->ll;
if (!netif_running(dev))
return;
if (lp->tx_old != lp->tx_new) {
mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
netif_wake_queue(dev);
return;
}
netif_stop_queue(dev);
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
if (dev->flags & IFF_PROMISC) {
*lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
} else {
*lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
lance_load_multicast(dev);
}
load_csrs(lp);
init_restart_lance(lp);
netif_wake_queue(dev);
}
static void lance_set_multicast_retry(unsigned long _opaque)
{
struct net_device *dev = (struct net_device *) _opaque;
lance_set_multicast(dev);
}
static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_set_rx_mode = lance_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
static int __devinit dec_lance_probe(struct device *bdev, const int type)
{
static unsigned version_printed;
static const char fmt[] = "declance%d";
char name[10];
struct net_device *dev;
struct lance_private *lp;
volatile struct lance_regs *ll;
resource_size_t start = 0, len = 0;
int i, ret;
unsigned long esar_base;
unsigned char *esar;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
if (bdev)
snprintf(name, sizeof(name), "%s", dev_name(bdev));
else {
i = 0;
dev = root_lance_dev;
while (dev) {
i++;
lp = netdev_priv(dev);
dev = lp->next;
}
snprintf(name, sizeof(name), fmt, i);
}
dev = alloc_etherdev(sizeof(struct lance_private));
if (!dev) {
ret = -ENOMEM;
goto err_out;
}
/*
* alloc_etherdev ensures the data structures used by the LANCE
* are aligned.
*/
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
lp->type = type;
switch (type) {
case ASIC_LANCE:
dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
/* buffer space for the on-board LANCE shared memory */
/*
* FIXME: ugly hack!
*/
dev->mem_start = CKSEG1ADDR(0x00020000);
dev->mem_end = dev->mem_start + 0x00020000;
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
/* Workaround crash with booting KN04 2.1k from Disk */
memset((void *)dev->mem_start, 0,
dev->mem_end - dev->mem_start);
/*
* setup the pointer arrays, this sucks [tm] :-(
*/
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * RX_RING_SIZE * RX_BUFF_SIZE +
2 * i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
/* Setup I/O ASIC LANCE DMA. */
lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
ioasic_write(IO_REG_LANCE_DMA_P,
CPHYSADDR(dev->mem_start) << 3);
break;
#ifdef CONFIG_TC
case PMAD_LANCE:
dev_set_drvdata(bdev, dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
if (!request_mem_region(start, len, dev_name(bdev))) {
printk(KERN_ERR
"%s: Unable to reserve MMIO resource\n",
dev_name(bdev));
ret = -EBUSY;
goto err_out_dev;
}
dev->mem_start = CKSEG1ADDR(start);
dev->mem_end = dev->mem_start + 0x100000;
dev->base_addr = dev->mem_start + 0x100000;
dev->irq = to_tc_dev(bdev)->interrupt;
esar_base = dev->mem_start + 0x1c0002;
lp->dma_irq = -1;
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + BUF_OFFSET_CPU +
i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + BUF_OFFSET_CPU +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
break;
#endif
case PMAX_LANCE:
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
lp->dma_irq = -1;
/*
* setup the pointer arrays, this sucks [tm] :-(
*/
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * RX_RING_SIZE * RX_BUFF_SIZE +
2 * i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
break;
default:
printk(KERN_ERR "%s: declance_init called with unknown type\n",
name);
ret = -ENODEV;
goto err_out_dev;
}
ll = (struct lance_regs *) dev->base_addr;
esar = (unsigned char *) esar_base;
/* prom checks */
/* First, check for test pattern */
if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
printk(KERN_ERR
"%s: Ethernet station address prom not found!\n",
name);
ret = -ENODEV;
goto err_out_resource;
}
/* Check the prom contents */
for (i = 0; i < 8; i++) {
if (esar[i * 4] != esar[0x3c - i * 4] &&
esar[i * 4] != esar[0x40 + i * 4] &&
esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
printk(KERN_ERR "%s: Something is wrong with the "
"ethernet station address prom!\n", name);
ret = -ENODEV;
goto err_out_resource;
}
}
/* Copy the ethernet address to the device structure, later to the
* lance initialization block so the lance gets it every time it's
* (re)initialized.
*/
switch (type) {
case ASIC_LANCE:
printk("%s: IOASIC onboard LANCE", name);
break;
case PMAD_LANCE:
printk("%s: PMAD-AA", name);
break;
case PMAX_LANCE:
printk("%s: PMAX onboard LANCE", name);
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
/* lp->ll is the location of the registers for lance card */
lp->ll = ll;
/* busmaster_regval (CSR3) should be zero according to the PMAD-AA
* specification.
*/
lp->busmaster_regval = 0;
dev->dma = 0;
/* We cannot sleep if the chip is busy during a
* multicast list update event, because such events
* can occur from interrupts (ex. IPv6). So we
* use a timer to try again later when necessary. -DaveM
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
lp->multicast_timer.function = lance_set_multicast_retry;
ret = register_netdev(dev);
if (ret) {
printk(KERN_ERR
"%s: Unable to register netdev, aborting.\n", name);
goto err_out_resource;
}
if (!bdev) {
lp->next = root_lance_dev;
root_lance_dev = dev;
}
printk("%s: registered as %s.\n", name, dev->name);
return 0;
err_out_resource:
if (bdev)
release_mem_region(start, len);
err_out_dev:
free_netdev(dev);
err_out:
return ret;
}
static void __exit dec_lance_remove(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
resource_size_t start, len;
unregister_netdev(dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
release_mem_region(start, len);
free_netdev(dev);
}
/* Find all the lance cards on the system and initialize them */
static int __init dec_lance_platform_probe(void)
{
int count = 0;
if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
count++;
} else if (!TURBOCHANNEL) {
if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
count++;
}
}
return (count > 0) ? 0 : -ENODEV;
}
static void __exit dec_lance_platform_remove(void)
{
while (root_lance_dev) {
struct net_device *dev = root_lance_dev;
struct lance_private *lp = netdev_priv(dev);
unregister_netdev(dev);
root_lance_dev = lp->next;
free_netdev(dev);
}
}
#ifdef CONFIG_TC
static int __devinit dec_lance_tc_probe(struct device *dev);
static int __exit dec_lance_tc_remove(struct device *dev);
static const struct tc_device_id dec_lance_tc_table[] = {
{ "DEC ", "PMAD-AA " },
{ }
};
MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
static struct tc_driver dec_lance_tc_driver = {
.id_table = dec_lance_tc_table,
.driver = {
.name = "declance",
.bus = &tc_bus_type,
.probe = dec_lance_tc_probe,
.remove = __exit_p(dec_lance_tc_remove),
},
};
static int __devinit dec_lance_tc_probe(struct device *dev)
{
int status = dec_lance_probe(dev, PMAD_LANCE);
if (!status)
get_device(dev);
return status;
}
static int __exit dec_lance_tc_remove(struct device *dev)
{
put_device(dev);
dec_lance_remove(dev);
return 0;
}
#endif
static int __init dec_lance_init(void)
{
int status;
status = tc_register_driver(&dec_lance_tc_driver);
if (!status)
dec_lance_platform_probe();
return status;
}
static void __exit dec_lance_exit(void)
{
dec_lance_platform_remove();
tc_unregister_driver(&dec_lance_tc_driver);
}
module_init(dec_lance_init);
module_exit(dec_lance_exit);
| gpl-2.0 |
KiWiX-s2/KiWiX-s2 | arch/tile/lib/memcpy_tile64.c | 7410 | 8973 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
/* Defined in memcpy.S */
extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n);
extern unsigned long __copy_to_user_inatomic_asm(
void __user *to, const void *from, unsigned long n);
extern unsigned long __copy_from_user_inatomic_asm(
void *to, const void __user *from, unsigned long n);
extern unsigned long __copy_from_user_zeroing_asm(
void *to, const void __user *from, unsigned long n);
typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
/* Size above which to consider TLB games for performance */
#define LARGE_COPY_CUTOFF 2048
/* Communicate to the simulator what we are trying to do. */
#define sim_allow_multiple_caching(b) \
__insn_mtspr(SPR_SIM_CONTROL, \
SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS))
/*
* Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
*
* We set up our own source and destination PTEs that we fully control.
* This is the only way to guarantee that we don't race with another
* thread that is modifying the PTE; we can't afford to try the
* copy_{to,from}_user() technique of catching the interrupt, since
* we must run with interrupts disabled to avoid the risk of some
* other code seeing the incoherent data in our cache. (Recall that
* our cache is indexed by PA, so even if the other code doesn't use
* our kmap_atomic virtual addresses, they'll still hit in cache using
* the normal VAs that aren't supposed to hit in cache.)
*/
static void memcpy_multicache(void *dest, const void *source,
pte_t dst_pte, pte_t src_pte, int len)
{
int idx;
unsigned long flags, newsrc, newdst;
pmd_t *pmdp;
pte_t *ptep;
int type0, type1;
int cpu = get_cpu();
/*
* Disable interrupts so that we don't recurse into memcpy()
* in an interrupt handler, nor accidentally reference
* the PA of the source from an interrupt routine. Also
* notify the simulator that we're playing games so we don't
* generate spurious coherency warnings.
*/
local_irq_save(flags);
sim_allow_multiple_caching(1);
/* Set up the new dest mapping */
type0 = kmap_atomic_idx_push();
idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
ptep = pte_offset_kernel(pmdp, newdst);
if (pte_val(*ptep) != pte_val(dst_pte)) {
set_pte(ptep, dst_pte);
local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
}
/* Set up the new source mapping */
type1 = kmap_atomic_idx_push();
idx += (type0 - type1);
src_pte = hv_pte_set_nc(src_pte);
src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
ptep = pte_offset_kernel(pmdp, newsrc);
__set_pte(ptep, src_pte); /* set_pte() would be confused by this */
local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
/* Actually move the data. */
__memcpy_asm((void *)newdst, (const void *)newsrc, len);
/*
* Remap the source as locally-cached and not OLOC'ed so that
* we can inval without also invaling the remote cpu's cache.
* This also avoids known errata with inv'ing cacheable oloc data.
*/
src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
__set_pte(ptep, src_pte); /* set_pte() would be confused by this */
local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
/*
* Do the actual invalidation, covering the full L2 cache line
* at the end since __memcpy_asm() is somewhat aggressive.
*/
__inv_buffer((void *)newsrc, len);
/*
* We're done: notify the simulator that all is back to normal,
* and re-enable interrupts and pre-emption.
*/
kmap_atomic_idx_pop();
kmap_atomic_idx_pop();
sim_allow_multiple_caching(0);
local_irq_restore(flags);
put_cpu();
}
/*
* Identify large copies from remotely-cached memory, and copy them
* via memcpy_multicache() if they look good, otherwise fall back
* to the particular kind of copying passed as the memcpy_t function.
*/
static unsigned long fast_copy(void *dest, const void *source, int len,
memcpy_t func)
{
/*
* Check if it's big enough to bother with. We may end up doing a
* small copy via TLB manipulation if we're near a page boundary,
* but presumably we'll make it up when we hit the second page.
*/
while (len >= LARGE_COPY_CUTOFF) {
int copy_size, bytes_left_on_page;
pte_t *src_ptep, *dst_ptep;
pte_t src_pte, dst_pte;
struct page *src_page, *dst_page;
/* Is the source page oloc'ed to a remote cpu? */
retry_source:
src_ptep = virt_to_pte(current->mm, (unsigned long)source);
if (src_ptep == NULL)
break;
src_pte = *src_ptep;
if (!hv_pte_get_present(src_pte) ||
!hv_pte_get_readable(src_pte) ||
hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
break;
if (get_remote_cache_cpu(src_pte) == smp_processor_id())
break;
src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
get_page(src_page);
if (pte_val(src_pte) != pte_val(*src_ptep)) {
put_page(src_page);
goto retry_source;
}
if (pte_huge(src_pte)) {
/* Adjust the PTE to correspond to a small page */
int pfn = hv_pte_get_pfn(src_pte);
pfn += (((unsigned long)source & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
src_pte = pfn_pte(pfn, src_pte);
src_pte = pte_mksmall(src_pte);
}
/* Is the destination page writable? */
retry_dest:
dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
if (dst_ptep == NULL) {
put_page(src_page);
break;
}
dst_pte = *dst_ptep;
if (!hv_pte_get_present(dst_pte) ||
!hv_pte_get_writable(dst_pte)) {
put_page(src_page);
break;
}
dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
if (dst_page == src_page) {
/*
* Source and dest are on the same page; this
* potentially exposes us to incoherence if any
* part of src and dest overlap on a cache line.
* Just give up rather than trying to be precise.
*/
put_page(src_page);
break;
}
get_page(dst_page);
if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
put_page(dst_page);
goto retry_dest;
}
if (pte_huge(dst_pte)) {
/* Adjust the PTE to correspond to a small page */
int pfn = hv_pte_get_pfn(dst_pte);
pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
dst_pte = pfn_pte(pfn, dst_pte);
dst_pte = pte_mksmall(dst_pte);
}
/* All looks good: create a cachable PTE and copy from it */
copy_size = len;
bytes_left_on_page =
PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
if (copy_size > bytes_left_on_page)
copy_size = bytes_left_on_page;
bytes_left_on_page =
PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
if (copy_size > bytes_left_on_page)
copy_size = bytes_left_on_page;
memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);
/* Release the pages */
put_page(dst_page);
put_page(src_page);
/* Continue on the next page */
dest += copy_size;
source += copy_size;
len -= copy_size;
}
return func(dest, source, len);
}
void *memcpy(void *to, const void *from, __kernel_size_t n)
{
if (n < LARGE_COPY_CUTOFF)
return (void *)__memcpy_asm(to, from, n);
else
return (void *)fast_copy(to, from, n, __memcpy_asm);
}
unsigned long __copy_to_user_inatomic(void __user *to, const void *from,
unsigned long n)
{
if (n < LARGE_COPY_CUTOFF)
return __copy_to_user_inatomic_asm(to, from, n);
else
return fast_copy(to, from, n, __copy_to_user_inatomic_asm);
}
unsigned long __copy_from_user_inatomic(void *to, const void __user *from,
unsigned long n)
{
if (n < LARGE_COPY_CUTOFF)
return __copy_from_user_inatomic_asm(to, from, n);
else
return fast_copy(to, from, n, __copy_from_user_inatomic_asm);
}
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
unsigned long n)
{
if (n < LARGE_COPY_CUTOFF)
return __copy_from_user_zeroing_asm(to, from, n);
else
return fast_copy(to, from, n, __copy_from_user_zeroing_asm);
}
#endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */
| gpl-2.0 |
sdotter/GPE-6.0.0 | drivers/base/power/trace.c | 8690 | 7120 | /*
* drivers/base/power/trace.c
*
* Copyright (C) 2006 Linus Torvalds
*
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
#include <linux/resume-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
#include <asm/rtc.h>
#include "power.h"
/*
* Horrid, horrid, horrid.
*
* It turns out that the _only_ piece of hardware that actually
* keeps its value across a hard boot (and, more importantly, the
* POST init sequence) is literally the realtime clock.
*
* Never mind that an RTC chip has 114 bytes (and often a whole
* other bank of an additional 128 bytes) of nice SRAM that is
* _designed_ to keep data - the POST will clear it. So we literally
* can just use the few bytes of actual time data, which means that
* we're really limited.
*
* It means, for example, that we can't use the seconds at all
* (since the time between the hang and the boot might be more
* than a minute), and we'd better not depend on the low bits of
* the minutes either.
*
* There are the wday fields etc, but I wouldn't guarantee those
* are dependable either. And if the date isn't valid, either the
* hw or POST will do strange things.
*
* So we're left with:
* - year: 0-99
* - month: 0-11
* - day-of-month: 1-28
* - hour: 0-23
* - min: (0-30)*2
*
* Giving us a total range of 0-16128000 (0xf61800), ie less
* than 24 bits of actual data we can save across reboots.
*
* And if your box can't boot in less than three minutes,
* you're screwed.
*
* Now, almost 24 bits of data is pitifully small, so we need
* to be pretty dense if we want to use it for anything nice.
* What we do is that instead of saving off nice readable info,
* we save off _hashes_ of information that we can hopefully
* regenerate after the reboot.
*
* In particular, this means that we might be unlucky, and hit
* a case where we have a hash collision, and we end up not
* being able to tell for certain exactly which case happened.
* But that's hopefully unlikely.
*
* What we do is to take the bits we can fit, and split them
* into three parts (16*997*1009 = 16095568), and use the values
* for:
* - 0-15: user-settable
* - 0-996: file + line number
* - 0-1008: device
*/
#define USERHASH (16)
#define FILEHASH (997)
#define DEVHASH (1009)
#define DEVSEED (7919)
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
{
unsigned int n = user + USERHASH*(file + FILEHASH*device);
// June 7th, 2006
static struct rtc_time time = {
.tm_sec = 0,
.tm_min = 0,
.tm_hour = 0,
.tm_mday = 7,
.tm_mon = 5, // June - counting from zero
.tm_year = 106,
.tm_wday = 3,
.tm_yday = 160,
.tm_isdst = 1
};
time.tm_year = (n % 100);
n /= 100;
time.tm_mon = (n % 12);
n /= 12;
time.tm_mday = (n % 28) + 1;
n /= 28;
time.tm_hour = (n % 24);
n /= 24;
time.tm_min = (n % 20) * 3;
n /= 20;
set_rtc_time(&time);
return n ? -1 : 0;
}
static unsigned int read_magic_time(void)
{
struct rtc_time time;
unsigned int val;
get_rtc_time(&time);
pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
if (val > 100)
val -= 100;
val += time.tm_mon * 100; /* 12 months */
val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */
val += time.tm_hour * 100 * 12 * 28; /* 24 hours */
val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */
return val;
}
/*
* This is just the sdbm hash function with a user-supplied
* seed and final size parameter.
*/
static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
{
unsigned char c;
while ((c = *data++) != 0) {
seed = (seed << 16) + (seed << 6) - seed + c;
}
return seed % mod;
}
void set_trace_device(struct device *dev)
{
dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
}
EXPORT_SYMBOL(set_trace_device);
/*
* We could just take the "tracedata" index into the .tracedata
* section instead. Generating a hash of the data gives us a
* chance to work across kernel versions, and perhaps more
* importantly it also gives us valid/invalid check (ie we will
* likely not give totally bogus reports - if the hash matches,
* it's not any guarantee, but it's a high _likelihood_ that
* the match is valid).
*/
void generate_resume_trace(const void *tracedata, unsigned int user)
{
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
}
EXPORT_SYMBOL(generate_resume_trace);
extern char __tracedata_start, __tracedata_end;
static int show_file_hash(unsigned int value)
{
int match;
char *tracedata;
match = 0;
for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
tracedata += 2 + sizeof(unsigned long)) {
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
unsigned int hash = hash_string(lineno, file, FILEHASH);
if (hash != value)
continue;
pr_info(" hash matches %s:%u\n", file, lineno);
match++;
}
return match;
}
static int show_dev_hash(unsigned int value)
{
int match = 0;
struct list_head *entry;
device_pm_lock();
entry = dpm_list.prev;
while (entry != &dpm_list) {
struct device * dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
if (hash == value) {
dev_info(dev, "hash matches\n");
match++;
}
entry = entry->prev;
}
device_pm_unlock();
return match;
}
static unsigned int hash_value_early_read;
int show_trace_dev_match(char *buf, size_t size)
{
unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
int ret = 0;
struct list_head *entry;
/*
* It's possible that multiple devices will match the hash and we can't
* tell which is the culprit, so it's best to output them all.
*/
device_pm_lock();
entry = dpm_list.prev;
while (size && entry != &dpm_list) {
struct device *dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
int len = snprintf(buf, size, "%s\n",
dev_driver_string(dev));
if (len > size)
len = size;
buf += len;
ret += len;
size -= len;
}
entry = entry->prev;
}
device_pm_unlock();
return ret;
}
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
return 0;
}
static int late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
val = val / FILEHASH;
dev = val /* % DEVHASH */;
pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
show_file_hash(file);
show_dev_hash(dev);
return 0;
}
core_initcall(early_resume_init);
late_initcall(late_resume_init);
| gpl-2.0 |
12thmantec/linux-3.5 | arch/arm/mach-pxa/clock.c | 9202 | 1532 | /*
* linux/arch/arm/mach-sa1100/clock.c
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/clkdev.h>
#include "clock.h"
static DEFINE_SPINLOCK(clocks_lock);
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
if (clk->enabled++ == 0)
clk->ops->enable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
if (clk->delay)
udelay(clk->delay);
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
unsigned long flags;
WARN_ON(clk->enabled == 0);
spin_lock_irqsave(&clocks_lock, flags);
if (--clk->enabled == 0)
clk->ops->disable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
rate = clk->rate;
if (clk->ops->getrate)
rate = clk->ops->getrate(clk);
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags;
int ret = -EINVAL;
if (clk->ops->setrate) {
spin_lock_irqsave(&clocks_lock, flags);
ret = clk->ops->setrate(clk, rate);
spin_unlock_irqrestore(&clocks_lock, flags);
}
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
void clk_dummy_enable(struct clk *clk)
{
}
void clk_dummy_disable(struct clk *clk)
{
}
const struct clkops clk_dummy_ops = {
.enable = clk_dummy_enable,
.disable = clk_dummy_disable,
};
struct clk clk_dummy = {
.ops = &clk_dummy_ops,
};
| gpl-2.0 |
CyanogenMod/android_kernel_htc_msm8994 | drivers/video/msm/mdss/mdp3_ppp.c | 243 | 39399 | /* Copyright (c) 2007, 2013-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/file.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/sync.h>
#include <linux/sw_sync.h>
#include "linux/proc_fs.h"
#include <linux/delay.h>
#include "mdss_fb.h"
#include "mdp3_ppp.h"
#include "mdp3_hwio.h"
#include "mdp3.h"
#include "mdss_debug.h"
#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
#define MDP_RELEASE_BW_TIMEOUT 50
#define MDP_PPP_MAX_BPP 4
#define MDP_PPP_DYNAMIC_FACTOR 3
#define MDP_PPP_MAX_READ_WRITE 3
#define ENABLE_SOLID_FILL 0x2
#define DISABLE_SOLID_FILL 0x0
struct ppp_resource ppp_res;
static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = {
[MDP_RGB_565] = true,
[MDP_BGR_565] = true,
[MDP_RGB_888] = true,
[MDP_BGR_888] = true,
[MDP_BGRA_8888] = true,
[MDP_RGBA_8888] = true,
[MDP_ARGB_8888] = true,
[MDP_XRGB_8888] = true,
[MDP_RGBX_8888] = true,
[MDP_Y_CRCB_H2V2] = true,
[MDP_Y_CBCR_H2V2] = true,
[MDP_Y_CBCR_H2V2_ADRENO] = true,
[MDP_Y_CBCR_H2V2_VENUS] = true,
[MDP_YCRYCB_H2V1] = true,
[MDP_Y_CBCR_H2V1] = true,
[MDP_Y_CRCB_H2V1] = true,
[MDP_BGRX_8888] = true,
};
#define MAX_LIST_WINDOW 16
#define MDP3_PPP_MAX_LIST_REQ 8
struct blit_req_list {
int count;
struct mdp_blit_req req_list[MAX_LIST_WINDOW];
struct mdp3_img_data src_data[MAX_LIST_WINDOW];
struct mdp3_img_data dst_data[MAX_LIST_WINDOW];
struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
u32 acq_fen_cnt;
int cur_rel_fen_fd;
struct sync_pt *cur_rel_sync_pt;
struct sync_fence *cur_rel_fence;
struct sync_fence *last_rel_fence;
};
struct blit_req_queue {
struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ];
int count;
int push_idx;
int pop_idx;
};
struct ppp_status {
bool wait_for_pop;
struct completion ppp_comp;
struct completion pop_q_comp;
struct mutex req_mutex; /* Protect request queue */
struct mutex config_ppp_mutex; /* Only one client configure register */
struct msm_fb_data_type *mfd;
struct work_struct blit_work;
struct blit_req_queue req_q;
struct sw_sync_timeline *timeline;
int timeline_value;
struct timer_list free_bw_timer;
struct work_struct free_bw_work;
bool bw_update;
bool bw_on;
u32 mdp_clk;
};
static struct ppp_status *ppp_stat;
int ppp_get_bpp(uint32_t format, uint32_t fb_format)
{
int bpp = -EINVAL;
if (format == MDP_FB_FORMAT)
format = fb_format;
bpp = ppp_bpp(format);
if (bpp <= 0)
pr_err("%s incorrect format %d\n", __func__, format);
return bpp;
}
int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
struct mdp3_img_data *data)
{
struct msmfb_data fb_data;
uint32_t stride;
int bpp = ppp_bpp(img->format);
if (bpp <= 0) {
pr_err("%s incorrect format %d\n", __func__, img->format);
return -EINVAL;
}
fb_data.flags = img->priv;
fb_data.memory_id = img->memory_id;
fb_data.offset = 0;
stride = img->width * bpp;
data->padding = 16 * stride;
return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
}
/* Check format */
int mdp3_ppp_verify_fmt(struct mdp_blit_req *req)
{
if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
MDP_IS_IMGTYPE_BAD(req->dst.format)) {
pr_err("%s: Color format out of range\n", __func__);
return -EINVAL;
}
if (!valid_fmt[req->src.format] ||
!valid_fmt[req->dst.format]) {
pr_err("%s: Color format not supported\n", __func__);
return -EINVAL;
}
return 0;
}
/* Check resolution */
int mdp3_ppp_verify_res(struct mdp_blit_req *req)
{
if ((req->src.width == 0) || (req->src.height == 0) ||
(req->src_rect.w == 0) || (req->src_rect.h == 0) ||
(req->dst.width == 0) || (req->dst.height == 0) ||
(req->dst_rect.w == 0) || (req->dst_rect.h == 0)) {
pr_err("%s: Height/width can't be 0\n", __func__);
return -EINVAL;
}
if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
((req->src_rect.y + req->src_rect.h) > req->src.height)) {
pr_err("%s: src roi larger than boundary\n", __func__);
return -EINVAL;
}
if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
pr_err("%s: dst roi larger than boundary\n", __func__);
return -EINVAL;
}
return 0;
}
/* scaling range check */
int mdp3_ppp_verify_scale(struct mdp_blit_req *req)
{
u32 src_width, src_height, dst_width, dst_height;
src_width = req->src_rect.w;
src_height = req->src_rect.h;
if (req->flags & MDP_ROT_90) {
dst_width = req->dst_rect.h;
dst_height = req->dst_rect.w;
} else {
dst_width = req->dst_rect.w;
dst_height = req->dst_rect.h;
}
switch (req->dst.format) {
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
src_width = (src_width / 2) * 2;
src_height = (src_height / 2) * 2;
dst_width = (dst_width / 2) * 2;
dst_height = (dst_height / 2) * 2;
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_YCRYCB_H2V1:
src_width = (src_width / 2) * 2;
dst_width = (dst_width / 2) * 2;
break;
default:
break;
}
if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
MDP_MAX_X_SCALE_FACTOR)
|| ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
MDP_MIN_X_SCALE_FACTOR)) {
pr_err("%s: x req scale factor beyond capability\n", __func__);
return -EINVAL;
}
if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
MDP_MAX_Y_SCALE_FACTOR)
|| ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
MDP_MIN_Y_SCALE_FACTOR)) {
pr_err("%s: y req scale factor beyond capability\n", __func__);
return -EINVAL;
}
return 0;
}
/* operation check */
int mdp3_ppp_verify_op(struct mdp_blit_req *req)
{
/*
* MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
* so using them together for MDP_SMART_BLIT.
*/
if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT)
return 0;
if (req->flags & MDP_DEINTERLACE) {
pr_err("\n%s(): deinterlace not supported", __func__);
return -EINVAL;
}
if (req->flags & MDP_SHARPENING) {
pr_err("\n%s(): sharpening not supported", __func__);
return -EINVAL;
}
return 0;
}
int mdp3_ppp_verify_req(struct mdp_blit_req *req)
{
int rc;
if (req == NULL) {
pr_err("%s: req == null\n", __func__);
return -EINVAL;
}
rc = mdp3_ppp_verify_fmt(req);
rc |= mdp3_ppp_verify_res(req);
rc |= mdp3_ppp_verify_scale(req);
rc |= mdp3_ppp_verify_op(req);
return rc;
}
int mdp3_ppp_pipe_wait(void)
{
int ret = 1;
/*
* wait 200 ms for ppp operation to complete before declaring
* the MDP hung
*/
ret = wait_for_completion_timeout(
&ppp_stat->ppp_comp, msecs_to_jiffies(200));
if (!ret)
pr_err("%s: Timed out waiting for the MDP.\n",
__func__);
return ret;
}
uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp)
{
uint32_t tpVal;
uint8_t plane_tp;
tpVal = 0;
if ((img->color_fmt == MDP_RGB_565)
|| (img->color_fmt == MDP_BGR_565)) {
/* transparent color conversion into 24 bpp */
plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11);
tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
plane_tp = (uint8_t) (old_tp & 0x1F);
tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5);
tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
} else {
/* 24bit RGB to RBG conversion */
tpVal = (old_tp & 0xFF00) >> 8;
tpVal |= (old_tp & 0xFF) << 8;
tpVal |= (old_tp & 0xFF0000);
}
return tpVal;
}
static void mdp3_ppp_intr_handler(int type, void *arg)
{
complete(&ppp_stat->ppp_comp);
}
static int mdp3_ppp_callback_setup(void)
{
int rc;
struct mdp3_intr_cb ppp_done_cb = {
.cb = mdp3_ppp_intr_handler,
.data = NULL,
};
rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb);
return rc;
}
void mdp3_ppp_kickoff(void)
{
init_completion(&ppp_stat->ppp_comp);
mdp3_irq_enable(MDP3_PPP_DONE);
ppp_enable();
ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
mdp3_ppp_pipe_wait();
ATRACE_END("mdp3_wait_for_ppp_comp");
mdp3_irq_disable(MDP3_PPP_DONE);
}
u32 mdp3_clk_calc(struct msm_fb_data_type *mfd, struct blit_req_list *lreq)
{
struct mdss_panel_info *panel_info = mfd->panel_info;
int i, lcount = 0;
struct mdp_blit_req *req;
u32 total_pixel;
u32 mdp_clk_rate = MDP_CORE_CLK_RATE_SVS;
total_pixel = panel_info->xres * panel_info->yres;
if (total_pixel > SVS_MAX_PIXEL)
return MDP_CORE_CLK_RATE_MAX;
for (i = 0; i < lcount; i++) {
req = &(lreq->req_list[i]);
if (req->src_rect.h != req->dst_rect.h ||
req->src_rect.w != req->dst_rect.w) {
mdp_clk_rate = MDP_CORE_CLK_RATE_MAX;
break;
}
}
return mdp_clk_rate;
}
struct bpp_info {
int bpp_num;
int bpp_den;
int bpp_pln;
};
int mdp3_get_bpp_info(int format, struct bpp_info *bpp)
{
int rc = 0;
switch (format) {
case MDP_RGB_565:
case MDP_BGR_565:
bpp->bpp_num = 2;
bpp->bpp_den = 1;
bpp->bpp_pln = 2;
break;
case MDP_RGB_888:
case MDP_BGR_888:
bpp->bpp_num = 3;
bpp->bpp_den = 1;
bpp->bpp_pln = 3;
break;
case MDP_BGRA_8888:
case MDP_RGBA_8888:
case MDP_ARGB_8888:
case MDP_XRGB_8888:
case MDP_RGBX_8888:
case MDP_BGRX_8888:
bpp->bpp_num = 4;
bpp->bpp_den = 1;
bpp->bpp_pln = 4;
break;
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_ADRENO:
case MDP_Y_CBCR_H2V2_VENUS:
bpp->bpp_num = 3;
bpp->bpp_den = 2;
bpp->bpp_pln = 1;
break;
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H2V1:
bpp->bpp_num = 2;
bpp->bpp_den = 1;
bpp->bpp_pln = 1;
break;
case MDP_YCRYCB_H2V1:
bpp->bpp_num = 2;
bpp->bpp_den = 1;
bpp->bpp_pln = 2;
break;
default:
rc = -EINVAL;
}
return rc;
}
u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
{
int src_h, src_w;
int dst_h, dst_w;
src_h = req->src_rect.h;
src_w = req->src_rect.w;
dst_h = req->dst_rect.h;
dst_w = req->dst_rect.w;
if ((!(req->flags & MDP_ROT_90) && src_h == dst_h && src_w == dst_w) ||
((req->flags & MDP_ROT_90) && src_h == dst_w && src_w == dst_h))
return bw_req;
bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h));
bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) +
(bw_req * dst_w) / (bpp * src_w));
return bw_req;
}
int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd, struct blit_req_list *lreq)
{
struct mdss_panel_info *panel_info = mfd->panel_info;
int i, lcount = 0;
struct mdp_blit_req *req;
struct bpp_info bpp;
u32 src_read_bw = 0;
u32 bg_read_bw = 0;
u32 dst_write_bw = 0;
u64 honest_ppp_ab = 0;
u32 fps = 0;
int smart_blit_fg_indx = -1;
u32 smart_blit_bg_read_bw = 0;
ATRACE_BEGIN(__func__);
lcount = lreq->count;
if (lcount == 0) {
pr_err("Blit with request count 0, continue to recover!!!\n");
ATRACE_END(__func__);
return 0;
}
if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
/* Do not update BW for solid fill */
ATRACE_END(__func__);
return 0;
}
for (i = 0; i < lcount; i++) {
req = &(lreq->req_list[i]);
if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
if (fps == 0)
fps = req->fps;
else
fps = panel_info->mipi.frame_rate;
}
mdp3_get_bpp_info(req->src.format, &bpp);
if ((bpp.bpp_pln == 1 || req->src.format == MDP_YCRYCB_H2V1) &&
req->src_rect.w >= 1280 && req->src_rect.h >= 720) {
/* Above 720p only 30fps video plaback is supported */
fps = 30;
} else {
/**
* Set FPS to mipi rate as currently there is
* no way to get this
*/
fps = panel_info->mipi.frame_rate;
}
if (lreq->req_list[i].flags & MDP_SMART_BLIT) {
/*
* Flag for smart blit FG layer index
* If blit request at index "n" has
* MDP_SMART_BLIT flag set then it will be used as BG
* layer in smart blit and request at index "n+1"
* will be used as FG layer
*/
smart_blit_fg_indx = i + 1;
bg_read_bw = req->src_rect.w * req->src_rect.h *
bpp.bpp_num / bpp.bpp_den;
bg_read_bw = mdp3_adjust_scale_factor(req,
bg_read_bw, bpp.bpp_pln);
/* Cache read BW of smart blit BG layer */
smart_blit_bg_read_bw = bg_read_bw;
} else {
src_read_bw = req->src_rect.w * req->src_rect.h *
bpp.bpp_num / bpp.bpp_den;
src_read_bw = mdp3_adjust_scale_factor(req,
src_read_bw, bpp.bpp_pln);
mdp3_get_bpp_info(req->dst.format, &bpp);
if (smart_blit_fg_indx == i) {
bg_read_bw = smart_blit_bg_read_bw;
smart_blit_fg_indx = -1;
} else {
if ((req->transp_mask != MDP_TRANSP_NOP) ||
(req->alpha < MDP_ALPHA_NOP) ||
(req->src.format == MDP_ARGB_8888) ||
(req->src.format == MDP_BGRA_8888) ||
(req->src.format == MDP_RGBA_8888)) {
bg_read_bw = req->dst_rect.w * req->dst_rect.h *
bpp.bpp_num / bpp.bpp_den;
bg_read_bw = mdp3_adjust_scale_factor(req,
bg_read_bw, bpp.bpp_pln);
} else {
bg_read_bw = 0;
}
}
dst_write_bw = req->dst_rect.w * req->dst_rect.h *
bpp.bpp_num / bpp.bpp_den;
honest_ppp_ab += (src_read_bw + bg_read_bw + dst_write_bw);
}
}
if (fps != 0)
honest_ppp_ab = honest_ppp_ab * fps;
else
honest_ppp_ab = honest_ppp_ab * panel_info->mipi.frame_rate;
if (honest_ppp_ab != ppp_res.next_ab) {
pr_debug("bandwidth vote update for ppp: ab = %llx\n",
honest_ppp_ab);
ppp_res.next_ab = honest_ppp_ab;
ppp_res.next_ib = honest_ppp_ab;
ppp_stat->bw_update = true;
ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab);
}
ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq);
ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate);
ATRACE_END(__func__);
return 0;
}
int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off)
{
uint64_t ab = 0, ib = 0;
int rate = 0;
int rc;
if (on_off) {
rate = ppp_res.clk_rate;
ab = ppp_res.next_ab;
ib = ppp_res.next_ib;
}
mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP);
rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP);
if (rc < 0) {
pr_err("%s: mdp3_clk_enable failed\n", __func__);
return rc;
}
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
if (rc < 0) {
mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP);
pr_err("%s: scale_set_quota failed\n", __func__);
return rc;
}
ppp_stat->bw_on = on_off;
ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS;
ppp_stat->bw_update = false;
return 0;
}
void mdp3_start_ppp(struct ppp_blit_op *blit_op)
{
/* Wait for the pipe to clear */
if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) &
MDP3_PPP_ACTIVE) {
pr_err("ppp core is hung up on previous request\n");
return;
}
config_ppp_op_mode(blit_op);
if (blit_op->solid_fill) {
MDP3_REG_WRITE(0x10138, 0x10000000);
MDP3_REG_WRITE(0x1014c, 0xffffffff);
MDP3_REG_WRITE(0x101b8, 0);
MDP3_REG_WRITE(0x101bc, 0);
MDP3_REG_WRITE(0x1013c, 0);
MDP3_REG_WRITE(0x10140, 0);
MDP3_REG_WRITE(0x10144, 0);
MDP3_REG_WRITE(0x10148, 0);
MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR,
blit_op->solid_fill_color);
MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
ENABLE_SOLID_FILL);
} else {
MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
DISABLE_SOLID_FILL);
}
/* Skip PPP kickoff for SMART_BLIT BG layer */
if (blit_op->mdp_op & MDPOP_SMART_BLIT)
pr_debug("Skip mdp3_ppp_kickoff\n");
else
mdp3_ppp_kickoff();
}
static int solid_fill_workaround(struct mdp_blit_req *req,
struct ppp_blit_op *blit_op)
{
/* Make width 2 when there is a solid fill of width 1, and make
sure width does not become zero while trying to avoid odd width */
if (blit_op->dst.roi.width == 1) {
if (req->dst_rect.x + 2 > req->dst.width) {
pr_err("%s: Unable to handle solid fill of width 1",
__func__);
return -EINVAL;
}
blit_op->dst.roi.width = 2;
}
if (blit_op->src.roi.width == 1) {
if (req->src_rect.x + 2 > req->src.width) {
pr_err("%s: Unable to handle solid fill of width 1",
__func__);
return -EINVAL;
}
blit_op->src.roi.width = 2;
}
/* Avoid odd width, as it could hang ppp during solid fill */
blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2;
blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2;
/* Avoid RGBA format, as it could hang ppp during solid fill */
if (blit_op->src.color_fmt == MDP_RGBA_8888)
blit_op->src.color_fmt = MDP_RGBX_8888;
if (blit_op->dst.color_fmt == MDP_RGBA_8888)
blit_op->dst.color_fmt = MDP_RGBX_8888;
return 0;
}
static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
struct mdp_blit_req *req, struct mdp3_img_data *src_data,
struct mdp3_img_data *dst_data)
{
unsigned long srcp0_start, srcp0_len, dst_start, dst_len;
uint32_t dst_width, dst_height;
int ret = 0;
srcp0_start = (unsigned long) src_data->addr;
srcp0_len = (unsigned long) src_data->len;
dst_start = (unsigned long) dst_data->addr;
dst_len = (unsigned long) dst_data->len;
blit_op->dst.prop.width = req->dst.width;
blit_op->dst.prop.height = req->dst.height;
blit_op->dst.color_fmt = req->dst.format;
blit_op->dst.p0 = (void *) dst_start;
blit_op->dst.p0 += req->dst.offset;
blit_op->dst.roi.x = req->dst_rect.x;
blit_op->dst.roi.y = req->dst_rect.y;
blit_op->dst.roi.width = req->dst_rect.w;
blit_op->dst.roi.height = req->dst_rect.h;
blit_op->src.roi.x = req->src_rect.x;
blit_op->src.roi.y = req->src_rect.y;
blit_op->src.roi.width = req->src_rect.w;
blit_op->src.roi.height = req->src_rect.h;
blit_op->src.prop.width = req->src.width;
blit_op->src.prop.height = req->src.height;
blit_op->src.color_fmt = req->src.format;
blit_op->src.p0 = (void *) (srcp0_start + req->src.offset);
if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO)
blit_op->src.p1 =
(void *) ((uint32_t) blit_op->src.p0 +
ALIGN((ALIGN(req->src.width, 32) *
ALIGN(req->src.height, 32)), 4096));
else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS)
blit_op->src.p1 =
(void *) ((uint32_t) blit_op->src.p0 +
ALIGN((ALIGN(req->src.width, 128) *
ALIGN(req->src.height, 32)), 4096));
else
blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 +
req->src.width * req->src.height);
if (req->flags & MDP_IS_FG)
blit_op->mdp_op |= MDPOP_LAYER_IS_FG;
/* blending check */
if (req->transp_mask != MDP_TRANSP_NOP) {
blit_op->mdp_op |= MDPOP_TRANSP;
blit_op->blend.trans_color =
mdp3_calc_tpval(&blit_op->src, req->transp_mask);
} else {
blit_op->blend.trans_color = 0;
}
req->alpha &= 0xff;
if (req->alpha < MDP_ALPHA_NOP) {
blit_op->mdp_op |= MDPOP_ALPHAB;
blit_op->blend.const_alpha = req->alpha;
} else {
blit_op->blend.const_alpha = 0xff;
}
/* rotation check */
if (req->flags & MDP_FLIP_LR)
blit_op->mdp_op |= MDPOP_LR;
if (req->flags & MDP_FLIP_UD)
blit_op->mdp_op |= MDPOP_UD;
if (req->flags & MDP_ROT_90)
blit_op->mdp_op |= MDPOP_ROT90;
if (req->flags & MDP_DITHER)
blit_op->mdp_op |= MDPOP_DITHER;
if (req->flags & MDP_BLEND_FG_PREMULT)
blit_op->mdp_op |= MDPOP_FG_PM_ALPHA;
/* scale check */
if (req->flags & MDP_ROT_90) {
dst_width = req->dst_rect.h;
dst_height = req->dst_rect.w;
} else {
dst_width = req->dst_rect.w;
dst_height = req->dst_rect.h;
}
if ((blit_op->src.roi.width != dst_width) ||
(blit_op->src.roi.height != dst_height))
blit_op->mdp_op |= MDPOP_ASCALE;
if (req->flags & MDP_BLUR)
blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR;
if (req->flags & MDP_SOLID_FILL) {
ret = solid_fill_workaround(req, blit_op);
if (ret)
return ret;
blit_op->solid_fill_color = (req->const_color.g & 0xFF)|
(req->const_color.r & 0xFF) << 8 |
(req->const_color.b & 0xFF) << 16 |
(req->const_color.alpha & 0xFF) << 24;
blit_op->solid_fill = true;
} else {
blit_op->solid_fill = false;
}
if (req->flags & MDP_SMART_BLIT)
blit_op->mdp_op |= MDPOP_SMART_BLIT;
return ret;
}
static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
struct mdp_blit_req *req)
{
int dst_h, src_w, i;
uint32_t mdp_op = blit_op->mdp_op;
void *src_p0 = blit_op->src.p0;
void *src_p1 = blit_op->src.p1;
void *dst_p0 = blit_op->dst.p0;
src_w = req->src_rect.w;
dst_h = blit_op->dst.roi.height;
/* bg tile fetching HW workaround */
for (i = 0; i < (req->dst_rect.h / 16); i++) {
/* this tile size */
blit_op->dst.roi.height = 16;
blit_op->src.roi.width =
(16 * req->src_rect.w) / req->dst_rect.h;
/* if it's out of scale range... */
if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
blit_op->src.roi.width =
(MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
MDP_MAX_X_SCALE_FACTOR;
else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
blit_op->src.roi.width =
(MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
MDP_MIN_X_SCALE_FACTOR;
mdp3_start_ppp(blit_op);
/* next tile location */
blit_op->dst.roi.y += 16;
blit_op->src.roi.x += blit_op->src.roi.width;
/* this is for a remainder update */
dst_h -= 16;
src_w -= blit_op->src.roi.width;
/* restore parameters that may have been overwritten */
blit_op->mdp_op = mdp_op;
blit_op->src.p0 = src_p0;
blit_op->src.p1 = src_p1;
blit_op->dst.p0 = dst_p0;
}
if ((dst_h < 0) || (src_w < 0))
pr_err
("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
__LINE__);
/* remainder update */
if ((dst_h > 0) && (src_w > 0)) {
u32 tmp_v;
blit_op->dst.roi.height = dst_h;
blit_op->src.roi.width = src_w;
if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
tmp_v =
(MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
MDP_MAX_X_SCALE_FACTOR +
((MDP_SCALE_Q_FACTOR *
blit_op->dst.roi.height) %
MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
/* move x location as roi width gets bigger */
blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width;
blit_op->src.roi.width = tmp_v;
} else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
tmp_v =
(MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
MDP_MIN_X_SCALE_FACTOR +
((MDP_SCALE_Q_FACTOR *
blit_op->dst.roi.height) %
MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
/*
* we don't move x location for continuity of
* source image
*/
blit_op->src.roi.width = tmp_v;
}
mdp3_start_ppp(blit_op);
}
}
static int mdp3_ppp_blit(struct msm_fb_data_type *mfd,
struct mdp_blit_req *req, struct mdp3_img_data *src_data,
struct mdp3_img_data *dst_data)
{
struct ppp_blit_op blit_op;
int ret = 0;
memset(&blit_op, 0, sizeof(blit_op));
if (req->dst.format == MDP_FB_FORMAT)
req->dst.format = mfd->fb_imgType;
if (req->src.format == MDP_FB_FORMAT)
req->src.format = mfd->fb_imgType;
if (mdp3_ppp_verify_req(req)) {
pr_err("%s: invalid image!\n", __func__);
return -EINVAL;
}
ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data);
if (ret) {
pr_err("%s: Failed to process the blit request", __func__);
return ret;
}
if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
(req->src.format == MDP_ARGB_8888) ||
(req->src.format == MDP_BGRA_8888) ||
(req->src.format == MDP_RGBA_8888)) &&
(blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
mdp3_ppp_tile_workaround(&blit_op, req);
} else {
mdp3_start_ppp(&blit_op);
}
return 0;
}
static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd,
struct mdp_blit_req *req, unsigned int remainder,
struct mdp3_img_data *src_data,
struct mdp3_img_data *dst_data)
{
int ret;
struct mdp_blit_req splitreq;
int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
/* make new request as provide by user */
splitreq = *req;
/* break dest roi at width*/
d_y_0 = d_y_1 = req->dst_rect.y;
d_h_0 = d_h_1 = req->dst_rect.h;
d_x_0 = req->dst_rect.x;
if (remainder == 14 || remainder == 6)
d_w_1 = req->dst_rect.w / 2;
else
d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
d_w_0 = req->dst_rect.w - d_w_1;
d_x_1 = d_x_0 + d_w_0;
/* blit first region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x05) ||
((splitreq.flags & 0x07) == 0x02) ||
((splitreq.flags & 0x07) == 0x0)) {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_1) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_1 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_1) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_1 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
} else {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_0) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_0 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_0) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_0 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
}
/* No need to split in height */
ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
if (ret)
return ret;
/* blit second region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x05) ||
((splitreq.flags & 0x07) == 0x02) ||
((splitreq.flags & 0x07) == 0x0)) {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
} else {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
}
/* No need to split in height ... just width */
return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
}
int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
struct mdp_blit_req *req,
struct mdp3_img_data *src_data,
struct mdp3_img_data *dst_data)
{
int ret;
unsigned int remainder = 0, is_bpp_4 = 0;
if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
pr_err("mdp_ppp: src img of zero size!\n");
return -EINVAL;
}
if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
return 0;
/* MDP width split workaround */
remainder = (req->dst_rect.w) % 16;
ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType);
if (ret <= 0) {
pr_err("mdp_ppp: incorrect bpp!\n");
return -EINVAL;
}
is_bpp_4 = (ret == 4) ? 1 : 0;
if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
!(req->flags & MDP_SOLID_FILL))
ret = mdp3_ppp_blit_workaround(mfd, req, remainder,
src_data, dst_data);
else
ret = mdp3_ppp_blit(mfd, req, src_data, dst_data);
return ret;
}
void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
{
int i, ret = 0;
ATRACE_BEGIN(__func__);
/* buf sync */
for (i = 0; i < req->acq_fen_cnt; i++) {
ret = sync_fence_wait(req->acq_fen[i],
WAIT_FENCE_FINAL_TIMEOUT);
if (ret < 0) {
pr_err("%s: sync_fence_wait failed! ret = %x\n",
__func__, ret);
break;
}
sync_fence_put(req->acq_fen[i]);
}
ATRACE_END(__func__);
if (ret < 0) {
while (i < req->acq_fen_cnt) {
sync_fence_put(req->acq_fen[i]);
i++;
}
}
req->acq_fen_cnt = 0;
}
void mdp3_ppp_signal_timeline(struct blit_req_list *req)
{
sw_sync_timeline_inc(ppp_stat->timeline, 1);
req->last_rel_fence = req->cur_rel_fence;
req->cur_rel_fence = 0;
}
static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req)
{
int i;
put_unused_fd(req->cur_rel_fen_fd);
sync_fence_put(req->cur_rel_fence);
req->cur_rel_fence = NULL;
req->cur_rel_fen_fd = 0;
ppp_stat->timeline_value--;
for (i = 0; i < req->acq_fen_cnt; i++)
sync_fence_put(req->acq_fen[i]);
req->acq_fen_cnt = 0;
}
static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req,
struct mdp_buf_sync *buf_sync)
{
int i, fence_cnt = 0, ret = 0;
int acq_fen_fd[MDP_MAX_FENCE_FD];
struct sync_fence *fence;
if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
(ppp_stat->timeline == NULL))
return -EINVAL;
if (buf_sync->acq_fen_fd_cnt)
ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
buf_sync->acq_fen_fd_cnt * sizeof(int));
if (ret) {
pr_err("%s: copy_from_user failed\n", __func__);
return ret;
}
for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
fence = sync_fence_fdget(acq_fen_fd[i]);
if (fence == NULL) {
pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
acq_fen_fd[i]);
ret = -EINVAL;
break;
}
req->acq_fen[i] = fence;
}
fence_cnt = i;
if (ret)
goto buf_sync_err_1;
req->acq_fen_cnt = fence_cnt;
if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
mdp3_ppp_wait_for_fence(req);
req->cur_rel_sync_pt = sw_sync_pt_create(ppp_stat->timeline,
ppp_stat->timeline_value++);
if (req->cur_rel_sync_pt == NULL) {
pr_err("%s: cannot create sync point\n", __func__);
ret = -ENOMEM;
goto buf_sync_err_2;
}
/* create fence */
req->cur_rel_fence = sync_fence_create("ppp-fence",
req->cur_rel_sync_pt);
if (req->cur_rel_fence == NULL) {
sync_pt_free(req->cur_rel_sync_pt);
req->cur_rel_sync_pt = NULL;
pr_err("%s: cannot create fence\n", __func__);
ret = -ENOMEM;
goto buf_sync_err_2;
}
/* create fd */
return ret;
buf_sync_err_2:
ppp_stat->timeline_value--;
buf_sync_err_1:
for (i = 0; i < fence_cnt; i++)
sync_fence_put(req->acq_fen[i]);
req->acq_fen_cnt = 0;
return ret;
}
void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req)
{
int idx = req_q->push_idx;
req_q->req[idx] = *req;
req_q->count++;
req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
}
struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q)
{
struct blit_req_list *req;
if (req_q->count == 0)
return NULL;
req = &req_q->req[req_q->pop_idx];
return req;
}
void mdp3_ppp_req_pop(struct blit_req_queue *req_q)
{
req_q->count--;
req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
}
void mdp3_free_fw_timer_func(unsigned long arg)
{
schedule_work(&ppp_stat->free_bw_work);
}
static void mdp3_free_bw_wq_handler(struct work_struct *work)
{
struct msm_fb_data_type *mfd = ppp_stat->mfd;
mutex_lock(&ppp_stat->config_ppp_mutex);
if (ppp_stat->bw_on) {
mdp3_ppp_turnon(mfd, 0);
}
mutex_unlock(&ppp_stat->config_ppp_mutex);
}
static bool is_blit_optimization_possible(struct blit_req_list *req, int indx)
{
int next = indx + 1;
bool status = false;
if (!(mdp3_res->smart_blit_en)) {
pr_debug("Smart BLIT disabled from sysfs\n");
return status;
}
if (next < req->count) {
/*
* Check userspace Smart BLIT Flag for current and next request
* Flag for smart blit FG layer index If blit request at index "n" has
* MDP_SMART_BLIT flag set then it will be used as BG layer in smart blit
* and request at index "n+1" will be used as FG layer
*/
if ((req->req_list[indx].flags & MDP_SMART_BLIT) &&
(!(req->req_list[next].flags & MDP_SMART_BLIT)))
status = true;
/*
* Enable SMART blit between request 0(BG) & request 1(FG) when
* destination ROI of BG and FG layer are same,
* No scaling on BG layer
* No rotation on BG Layer.
* BG Layer color format is RGB
*/
else if ((indx == 0) && (!(req->req_list[indx].flags &
(MDP_ROT_90 | MDP_FLIP_UD | MDP_FLIP_LR))) &&
(check_if_rgb(req->req_list[indx].src.format)) &&
(req->req_list[indx].dst_rect.x == req->req_list[next].dst_rect.x) &&
(req->req_list[indx].dst_rect.y == req->req_list[next].dst_rect.y) &&
(req->req_list[indx].dst_rect.w == req->req_list[next].dst_rect.w) &&
(req->req_list[indx].dst_rect.h == req->req_list[next].dst_rect.h) &&
(req->req_list[indx].dst_rect.w == req->req_list[indx].src_rect.w) &&
(req->req_list[indx].dst_rect.h == req->req_list[indx].src_rect.h)) {
status = true;
req->req_list[indx].flags |= MDP_SMART_BLIT;
}
}
if (status)
pr_debug("Optimize Blit for Layer: %d Req Count %d\n", indx, req->count) ;
return status;
}
static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
{
struct msm_fb_data_type *mfd = ppp_stat->mfd;
struct blit_req_list *req;
int i, rc = 0;
bool smart_blit = false;
int smart_blit_fg_index = -1;
mutex_lock(&ppp_stat->config_ppp_mutex);
req = mdp3_ppp_next_req(&ppp_stat->req_q);
if (!req) {
mutex_unlock(&ppp_stat->config_ppp_mutex);
return;
}
if (!ppp_stat->bw_on) {
mdp3_ppp_turnon(mfd, 1);
if (rc < 0) {
mutex_unlock(&ppp_stat->config_ppp_mutex);
pr_err("%s: Enable ppp resources failed\n", __func__);
return;
}
}
while (req) {
mdp3_ppp_wait_for_fence(req);
mdp3_calc_ppp_res(mfd, req);
if (ppp_res.clk_rate != ppp_stat->mdp_clk) {
ppp_stat->mdp_clk = ppp_res.clk_rate;
mdp3_clk_set_rate(MDP3_CLK_MDP_SRC,
ppp_stat->mdp_clk, MDP3_CLIENT_PPP);
}
if (ppp_stat->bw_update) {
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP,
ppp_res.next_ab, ppp_res.next_ib);
if (rc < 0) {
pr_err("%s: bw set quota failed\n", __func__);
return;
}
ppp_stat->bw_update = false;
}
ATRACE_BEGIN("mpd3_ppp_start");
for (i = 0; i < req->count; i++) {
smart_blit = is_blit_optimization_possible(req, i);
if (smart_blit)
/* Blit request index of FG layer in smart blit */
smart_blit_fg_index = i + 1;
if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
/* Do the actual blit. */
if (!rc) {
rc = mdp3_ppp_start_blit(mfd,
&(req->req_list[i]),
&req->src_data[i],
&req->dst_data[i]);
}
/* Unmap blit source buffer */
if (smart_blit == false)
mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
if (smart_blit_fg_index == i) {
/* Unmap smart blit background buffer */
mdp3_put_img(&req->src_data[i - 1], MDP3_CLIENT_PPP);
smart_blit_fg_index = -1;
}
mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
smart_blit = false;
}
}
ATRACE_END("mdp3_ppp_start");
/* Signal to release fence */
mutex_lock(&ppp_stat->req_mutex);
mdp3_ppp_signal_timeline(req);
mdp3_ppp_req_pop(&ppp_stat->req_q);
req = mdp3_ppp_next_req(&ppp_stat->req_q);
if (ppp_stat->wait_for_pop)
complete(&ppp_stat->pop_q_comp);
mutex_unlock(&ppp_stat->req_mutex);
}
mod_timer(&ppp_stat->free_bw_timer, jiffies +
msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT));
mutex_unlock(&ppp_stat->config_ppp_mutex);
}
int mdp3_ppp_parse_req(void __user *p,
struct mdp_async_blit_req_list *req_list_header,
int async)
{
struct blit_req_list *req;
struct blit_req_queue *req_q = &ppp_stat->req_q;
struct sync_fence *fence = NULL;
int count, rc, idx, i;
count = req_list_header->count;
mutex_lock(&ppp_stat->req_mutex);
while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) {
ppp_stat->wait_for_pop = true;
mutex_unlock(&ppp_stat->req_mutex);
rc = wait_for_completion_timeout(
&ppp_stat->pop_q_comp, 5 * HZ);
if (rc == 0) {
/* This will only occur if there is serious problem */
pr_err("%s: timeout exiting queuing request\n",
__func__);
return -EBUSY;
}
mutex_lock(&ppp_stat->req_mutex);
ppp_stat->wait_for_pop = false;
}
idx = req_q->push_idx;
req = &req_q->req[idx];
if (copy_from_user(&req->req_list, p,
sizeof(struct mdp_blit_req) * count)) {
mutex_unlock(&ppp_stat->req_mutex);
return -EFAULT;
}
rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync);
if (rc < 0) {
pr_err("%s: Failed create sync point\n", __func__);
mutex_unlock(&ppp_stat->req_mutex);
return rc;
}
req->count = count;
/* We need to grab ion handle while running in client thread */
for (i = 0; i < count; i++) {
rc = mdp3_ppp_get_img(&req->req_list[i].src,
&req->req_list[i], &req->src_data[i]);
if (rc < 0 || req->src_data[i].len == 0) {
pr_err("mdp_ppp: couldn't retrieve src img from mem\n");
goto parse_err_1;
}
rc = mdp3_ppp_get_img(&req->req_list[i].dst,
&req->req_list[i], &req->dst_data[i]);
if (rc < 0 || req->dst_data[i].len == 0) {
mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
goto parse_err_1;
}
}
if (async) {
req->cur_rel_fen_fd = get_unused_fd_flags(0);
if (req->cur_rel_fen_fd < 0) {
pr_err("%s: get_unused_fd_flags failed\n", __func__);
rc = -ENOMEM;
goto parse_err_1;
}
sync_fence_install(req->cur_rel_fence, req->cur_rel_fen_fd);
rc = copy_to_user(req_list_header->sync.rel_fen_fd,
&req->cur_rel_fen_fd, sizeof(int));
if (rc) {
pr_err("%s:copy_to_user failed\n", __func__);
goto parse_err_2;
}
} else {
fence = req->cur_rel_fence;
}
mdp3_ppp_req_push(req_q, req);
mutex_unlock(&ppp_stat->req_mutex);
schedule_work(&ppp_stat->blit_work);
if (!async) {
/* wait for release fence */
rc = sync_fence_wait(fence,
5 * MSEC_PER_SEC);
if (rc < 0)
pr_err("%s: sync blit! rc = %x\n", __func__, rc);
sync_fence_put(fence);
fence = NULL;
}
return 0;
parse_err_2:
put_unused_fd(req->cur_rel_fen_fd);
parse_err_1:
for (i--; i >= 0; i--) {
mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
}
mdp3_ppp_deinit_buf_sync(req);
mutex_unlock(&ppp_stat->req_mutex);
return rc;
}
int mdp3_ppp_res_init(struct msm_fb_data_type *mfd)
{
const char timeline_name[] = "mdp3_ppp";
ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL);
if (!ppp_stat) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
}
/*Setup sync_pt timeline for ppp*/
ppp_stat->timeline = sw_sync_timeline_create(timeline_name);
if (ppp_stat->timeline == NULL) {
pr_err("%s: cannot create time line\n", __func__);
return -ENOMEM;
} else {
ppp_stat->timeline_value = 1;
}
INIT_WORK(&ppp_stat->blit_work, mdp3_ppp_blit_wq_handler);
INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler);
init_completion(&ppp_stat->pop_q_comp);
mutex_init(&ppp_stat->req_mutex);
mutex_init(&ppp_stat->config_ppp_mutex);
init_timer(&ppp_stat->free_bw_timer);
ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func;
ppp_stat->free_bw_timer.data = 0;
ppp_stat->mfd = mfd;
mdp3_ppp_callback_setup();
return 0;
}
| gpl-2.0 |
hwoarang/linux | drivers/iio/adc/mcp3422.c | 243 | 9878 | /*
* mcp3422.c - driver for the Microchip mcp3422/3/4 chip family
*
* Copyright (C) 2013, Angelo Compagnucci
* Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
*
* Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
*
* This driver exports the value of analog input voltage to sysfs, the
* voltage unit is nV.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
/* Masks */
#define MCP3422_CHANNEL_MASK 0x60
#define MCP3422_PGA_MASK 0x03
#define MCP3422_SRATE_MASK 0x0C
#define MCP3422_SRATE_240 0x0
#define MCP3422_SRATE_60 0x1
#define MCP3422_SRATE_15 0x2
#define MCP3422_SRATE_3 0x3
#define MCP3422_PGA_1 0
#define MCP3422_PGA_2 1
#define MCP3422_PGA_4 2
#define MCP3422_PGA_8 3
#define MCP3422_CONT_SAMPLING 0x10
#define MCP3422_CHANNEL(config) (((config) & MCP3422_CHANNEL_MASK) >> 5)
#define MCP3422_PGA(config) ((config) & MCP3422_PGA_MASK)
#define MCP3422_SAMPLE_RATE(config) (((config) & MCP3422_SRATE_MASK) >> 2)
#define MCP3422_CHANNEL_VALUE(value) (((value) << 5) & MCP3422_CHANNEL_MASK)
#define MCP3422_PGA_VALUE(value) ((value) & MCP3422_PGA_MASK)
#define MCP3422_SAMPLE_RATE_VALUE(value) ((value << 2) & MCP3422_SRATE_MASK)
#define MCP3422_CHAN(_index) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
| BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
}
/* LSB is in nV to eliminate floating point */
static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
/*
* scales calculated as:
* rates_to_lsb[sample_rate] / (1 << pga);
* pga is 1 for 0, 2
*/
static const int mcp3422_scales[4][4] = {
{ 1000000, 250000, 62500, 15625 },
{ 500000 , 125000, 31250, 7812 },
{ 250000 , 62500 , 15625, 3906 },
{ 125000 , 31250 , 7812 , 1953 } };
/* Constant msleep times for data acquisitions */
static const int mcp3422_read_times[4] = {
[MCP3422_SRATE_240] = 1000 / 240,
[MCP3422_SRATE_60] = 1000 / 60,
[MCP3422_SRATE_15] = 1000 / 15,
[MCP3422_SRATE_3] = 1000 / 3 };
/* sample rates to integer conversion table */
static const int mcp3422_sample_rates[4] = {
[MCP3422_SRATE_240] = 240,
[MCP3422_SRATE_60] = 60,
[MCP3422_SRATE_15] = 15,
[MCP3422_SRATE_3] = 3 };
/* sample rates to sign extension table */
static const int mcp3422_sign_extend[4] = {
[MCP3422_SRATE_240] = 11,
[MCP3422_SRATE_60] = 13,
[MCP3422_SRATE_15] = 15,
[MCP3422_SRATE_3] = 17 };
/* Client data (each client gets its own) */
struct mcp3422 {
struct i2c_client *i2c;
u8 config;
u8 pga[4];
struct mutex lock;
};
static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
{
int ret;
mutex_lock(&adc->lock);
ret = i2c_master_send(adc->i2c, &newconfig, 1);
if (ret > 0) {
adc->config = newconfig;
ret = 0;
}
mutex_unlock(&adc->lock);
return ret;
}
static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
{
int ret = 0;
u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
u8 buf[4] = {0, 0, 0, 0};
u32 temp;
if (sample_rate == MCP3422_SRATE_3) {
ret = i2c_master_recv(adc->i2c, buf, 4);
temp = buf[0] << 16 | buf[1] << 8 | buf[2];
*config = buf[3];
} else {
ret = i2c_master_recv(adc->i2c, buf, 3);
temp = buf[0] << 8 | buf[1];
*config = buf[2];
}
*value = sign_extend32(temp, mcp3422_sign_extend[sample_rate]);
return ret;
}
static int mcp3422_read_channel(struct mcp3422 *adc,
struct iio_chan_spec const *channel, int *value)
{
int ret;
u8 config;
u8 req_channel = channel->channel;
if (req_channel != MCP3422_CHANNEL(adc->config)) {
config = adc->config;
config &= ~MCP3422_CHANNEL_MASK;
config |= MCP3422_CHANNEL_VALUE(req_channel);
config &= ~MCP3422_PGA_MASK;
config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
ret = mcp3422_update_config(adc, config);
if (ret < 0)
return ret;
msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
}
return mcp3422_read(adc, value, &config);
}
static int mcp3422_read_raw(struct iio_dev *iio,
struct iio_chan_spec const *channel, int *val1,
int *val2, long mask)
{
struct mcp3422 *adc = iio_priv(iio);
int err;
u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
u8 pga = MCP3422_PGA(adc->config);
switch (mask) {
case IIO_CHAN_INFO_RAW:
err = mcp3422_read_channel(adc, channel, val1);
if (err < 0)
return -EINVAL;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val1 = 0;
*val2 = mcp3422_scales[sample_rate][pga];
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_SAMP_FREQ:
*val1 = mcp3422_sample_rates[MCP3422_SAMPLE_RATE(adc->config)];
return IIO_VAL_INT;
default:
break;
}
return -EINVAL;
}
static int mcp3422_write_raw(struct iio_dev *iio,
struct iio_chan_spec const *channel, int val1,
int val2, long mask)
{
struct mcp3422 *adc = iio_priv(iio);
u8 temp;
u8 config = adc->config;
u8 req_channel = channel->channel;
u8 sample_rate = MCP3422_SAMPLE_RATE(config);
u8 i;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
if (val1 != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(mcp3422_scales[0]); i++) {
if (val2 == mcp3422_scales[sample_rate][i]) {
adc->pga[req_channel] = i;
config &= ~MCP3422_CHANNEL_MASK;
config |= MCP3422_CHANNEL_VALUE(req_channel);
config &= ~MCP3422_PGA_MASK;
config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
return mcp3422_update_config(adc, config);
}
}
return -EINVAL;
case IIO_CHAN_INFO_SAMP_FREQ:
switch (val1) {
case 240:
temp = MCP3422_SRATE_240;
break;
case 60:
temp = MCP3422_SRATE_60;
break;
case 15:
temp = MCP3422_SRATE_15;
break;
case 3:
temp = MCP3422_SRATE_3;
break;
default:
return -EINVAL;
}
config &= ~MCP3422_CHANNEL_MASK;
config |= MCP3422_CHANNEL_VALUE(req_channel);
config &= ~MCP3422_SRATE_MASK;
config |= MCP3422_SAMPLE_RATE_VALUE(temp);
return mcp3422_update_config(adc, config);
default:
break;
}
return -EINVAL;
}
static int mcp3422_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, long mask)
{
switch (mask) {
case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_SAMP_FREQ:
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static ssize_t mcp3422_show_scales(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mcp3422 *adc = iio_priv(dev_to_iio_dev(dev));
u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
return sprintf(buf, "0.%09u 0.%09u 0.%09u 0.%09u\n",
mcp3422_scales[sample_rate][0],
mcp3422_scales[sample_rate][1],
mcp3422_scales[sample_rate][2],
mcp3422_scales[sample_rate][3]);
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("240 60 15 3");
static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
mcp3422_show_scales, NULL, 0);
static struct attribute *mcp3422_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
NULL,
};
static const struct attribute_group mcp3422_attribute_group = {
.attrs = mcp3422_attributes,
};
static const struct iio_chan_spec mcp3422_channels[] = {
MCP3422_CHAN(0),
MCP3422_CHAN(1),
};
static const struct iio_chan_spec mcp3424_channels[] = {
MCP3422_CHAN(0),
MCP3422_CHAN(1),
MCP3422_CHAN(2),
MCP3422_CHAN(3),
};
static const struct iio_info mcp3422_info = {
.read_raw = mcp3422_read_raw,
.write_raw = mcp3422_write_raw,
.write_raw_get_fmt = mcp3422_write_raw_get_fmt,
.attrs = &mcp3422_attribute_group,
.driver_module = THIS_MODULE,
};
static int mcp3422_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
struct mcp3422 *adc;
int err;
u8 config;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
adc = iio_priv(indio_dev);
adc->i2c = client;
mutex_init(&adc->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp3422_info;
switch ((unsigned int)(id->driver_data)) {
case 2:
case 3:
indio_dev->channels = mcp3422_channels;
indio_dev->num_channels = ARRAY_SIZE(mcp3422_channels);
break;
case 4:
indio_dev->channels = mcp3424_channels;
indio_dev->num_channels = ARRAY_SIZE(mcp3424_channels);
break;
}
/* meaningful default configuration */
config = (MCP3422_CONT_SAMPLING
| MCP3422_CHANNEL_VALUE(1)
| MCP3422_PGA_VALUE(MCP3422_PGA_1)
| MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
mcp3422_update_config(adc, config);
err = devm_iio_device_register(&client->dev, indio_dev);
if (err < 0)
return err;
i2c_set_clientdata(client, indio_dev);
return 0;
}
static const struct i2c_device_id mcp3422_id[] = {
{ "mcp3422", 2 },
{ "mcp3423", 3 },
{ "mcp3424", 4 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp3422_id);
#ifdef CONFIG_OF
static const struct of_device_id mcp3422_of_match[] = {
{ .compatible = "mcp3422" },
{ }
};
MODULE_DEVICE_TABLE(of, mcp3422_of_match);
#endif
static struct i2c_driver mcp3422_driver = {
.driver = {
.name = "mcp3422",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(mcp3422_of_match),
},
.probe = mcp3422_probe,
.id_table = mcp3422_id,
};
module_i2c_driver(mcp3422_driver);
MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
MODULE_DESCRIPTION("Microchip mcp3422/3/4 driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
dan82840/Netgear-RBR50 | git_home/linux.git/drivers/media/usb/gspca/vicam.c | 2035 | 10326 | /*
* gspca ViCam subdriver
*
* Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
*
* Based on the usbvideo vicam driver, which is:
*
* Copyright (c) 2002 Joe Burks (jburks@wavicle.org),
* Chris Cheney (chris.cheney@gmail.com),
* Pavel Machek (pavel@ucw.cz),
* John Tyner (jtyner@cs.ucr.edu),
* Monroe Williams (monroe@pobox.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "vicam"
#define HEADER_SIZE 64
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include "gspca.h"
#define VICAM_FIRMWARE "vicam/firmware.fw"
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("GSPCA ViCam USB Camera Driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(VICAM_FIRMWARE);
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct work_struct work_struct;
struct workqueue_struct *work_thread;
};
/* The vicam sensor has a resolution of 512 x 244, with I believe square
pixels, but this is forced to a 4:3 ratio by optics. So it has
non square pixels :( */
static struct v4l2_pix_format vicam_mode[] = {
{ 256, 122, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
.bytesperline = 256,
.sizeimage = 256 * 122,
.colorspace = V4L2_COLORSPACE_SRGB,},
/* 2 modes with somewhat more square pixels */
{ 256, 200, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
.bytesperline = 256,
.sizeimage = 256 * 200,
.colorspace = V4L2_COLORSPACE_SRGB,},
{ 256, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
.bytesperline = 256,
.sizeimage = 256 * 240,
.colorspace = V4L2_COLORSPACE_SRGB,},
#if 0 /* This mode has extremely non square pixels, testing use only */
{ 512, 122, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
.bytesperline = 512,
.sizeimage = 512 * 122,
.colorspace = V4L2_COLORSPACE_SRGB,},
#endif
{ 512, 244, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
.bytesperline = 512,
.sizeimage = 512 * 244,
.colorspace = V4L2_COLORSPACE_SRGB,},
};
static int vicam_control_msg(struct gspca_dev *gspca_dev, u8 request,
u16 value, u16 index, u8 *data, u16 len)
{
int ret;
ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
request,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, len, 1000);
if (ret < 0)
pr_err("control msg req %02X error %d\n", request, ret);
return ret;
}
static int vicam_set_camera_power(struct gspca_dev *gspca_dev, int state)
{
int ret;
ret = vicam_control_msg(gspca_dev, 0x50, state, 0, NULL, 0);
if (ret < 0)
return ret;
if (state)
ret = vicam_control_msg(gspca_dev, 0x55, 1, 0, NULL, 0);
return ret;
}
/*
* request and read a block of data
*/
static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
{
int ret, unscaled_height, act_len = 0;
u8 *req_data = gspca_dev->usb_buf;
s32 expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
s32 gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
memset(req_data, 0, 16);
req_data[0] = gain;
if (gspca_dev->pixfmt.width == 256)
req_data[1] |= 0x01; /* low nibble x-scale */
if (gspca_dev->pixfmt.height <= 122) {
req_data[1] |= 0x10; /* high nibble y-scale */
unscaled_height = gspca_dev->pixfmt.height * 2;
} else
unscaled_height = gspca_dev->pixfmt.height;
req_data[2] = 0x90; /* unknown, does not seem to do anything */
if (unscaled_height <= 200)
req_data[3] = 0x06; /* vend? */
else if (unscaled_height <= 242) /* Yes 242 not 240 */
req_data[3] = 0x07; /* vend? */
else /* Up to 244 lines with req_data[3] == 0x08 */
req_data[3] = 0x08; /* vend? */
if (expo < 256) {
/* Frame rate maxed out, use partial frame expo time */
req_data[4] = 255 - expo;
req_data[5] = 0x00;
req_data[6] = 0x00;
req_data[7] = 0x01;
} else {
/* Modify frame rate */
req_data[4] = 0x00;
req_data[5] = 0x00;
req_data[6] = expo & 0xFF;
req_data[7] = expo >> 8;
}
req_data[8] = ((244 - unscaled_height) / 2) & ~0x01; /* vstart */
/* bytes 9-15 do not seem to affect exposure or image quality */
mutex_lock(&gspca_dev->usb_lock);
ret = vicam_control_msg(gspca_dev, 0x51, 0x80, 0, req_data, 16);
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0)
return ret;
ret = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x81),
data, size, &act_len, 10000);
/* successful, it returns 0, otherwise negative */
if (ret < 0 || act_len != size) {
pr_err("bulk read fail (%d) len %d/%d\n",
ret, act_len, size);
return -EIO;
}
return 0;
}
/*
* This function is called as a workqueue function and runs whenever the camera
* is streaming data. Because it is a workqueue function it is allowed to sleep
* so we can use synchronous USB calls. To avoid possible collisions with other
* threads attempting to use gspca_dev->usb_buf we take the usb_lock when
* performing USB operations using it. In practice we don't really need this
* as the cameras controls are only written from the workqueue.
*/
static void vicam_dostream(struct work_struct *work)
{
struct sd *sd = container_of(work, struct sd, work_struct);
struct gspca_dev *gspca_dev = &sd->gspca_dev;
int ret, frame_sz;
u8 *buffer;
frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage +
HEADER_SIZE;
buffer = kmalloc(frame_sz, GFP_KERNEL | GFP_DMA);
if (!buffer) {
pr_err("Couldn't allocate USB buffer\n");
goto exit;
}
while (gspca_dev->present && gspca_dev->streaming) {
#ifdef CONFIG_PM
if (gspca_dev->frozen)
break;
#endif
ret = vicam_read_frame(gspca_dev, buffer, frame_sz);
if (ret < 0)
break;
/* Note the frame header contents seem to be completely
constant, they do not change with either image, or
settings. So we simply discard it. The frames have
a very similar 64 byte footer, which we don't even
bother reading from the cam */
gspca_frame_add(gspca_dev, FIRST_PACKET,
buffer + HEADER_SIZE,
frame_sz - HEADER_SIZE);
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
}
exit:
kfree(buffer);
}
/* This function is called at probe time just before sd_init */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct cam *cam = &gspca_dev->cam;
struct sd *sd = (struct sd *)gspca_dev;
/* We don't use the buffer gspca allocates so make it small. */
cam->bulk = 1;
cam->bulk_size = 64;
cam->cam_mode = vicam_mode;
cam->nmodes = ARRAY_SIZE(vicam_mode);
INIT_WORK(&sd->work_struct, vicam_dostream);
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
int ret;
const struct ihex_binrec *rec;
const struct firmware *uninitialized_var(fw);
u8 *firmware_buf;
ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
&gspca_dev->dev->dev);
if (ret) {
pr_err("Failed to load \"vicam/firmware.fw\": %d\n", ret);
return ret;
}
firmware_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!firmware_buf) {
ret = -ENOMEM;
goto exit;
}
for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len));
ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf,
be16_to_cpu(rec->len));
if (ret < 0)
break;
}
kfree(firmware_buf);
exit:
release_firmware(fw);
return ret;
}
/* Set up for getting frames. */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *)gspca_dev;
int ret;
ret = vicam_set_camera_power(gspca_dev, 1);
if (ret < 0)
return ret;
/* Start the workqueue function to do the streaming */
sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
queue_work(sd->work_thread, &sd->work_struct);
return 0;
}
/* called on streamoff with alt==0 and on disconnect */
/* the usb_lock is held at entry - restore on exit */
static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *dev = (struct sd *)gspca_dev;
/* wait for the work queue to terminate */
mutex_unlock(&gspca_dev->usb_lock);
/* This waits for vicam_dostream to finish */
destroy_workqueue(dev->work_thread);
dev->work_thread = NULL;
mutex_lock(&gspca_dev->usb_lock);
if (gspca_dev->present)
vicam_set_camera_power(gspca_dev, 0);
}
static int sd_init_controls(struct gspca_dev *gspca_dev)
{
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 2);
gspca_dev->exposure = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_EXPOSURE, 0, 2047, 1, 256);
gspca_dev->gain = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_GAIN, 0, 255, 1, 200);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
}
return 0;
}
/* Table of supported USB devices */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04c1, 0x009d)},
{USB_DEVICE(0x0602, 0x1001)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.init_controls = sd_init_controls,
.start = sd_start,
.stop0 = sd_stop0,
};
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id,
&sd_desc,
sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
Altaf-Mahdi/android_kernel_oneplus_msm8994 | fs/afs/inode.c | 2291 | 12554 | /*
* Copyright (c) 2002 Red Hat, Inc. All rights reserved.
*
* This software may be freely redistributed under the terms of the
* GNU General Public License.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Authors: David Woodhouse <dwmw2@infradead.org>
* David Howells <dhowells@redhat.com>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include "internal.h"
struct afs_iget_data {
struct afs_fid fid;
struct afs_volume *volume; /* volume on which resides */
};
/*
* map the AFS file status to the inode member variables
*/
static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
{
struct inode *inode = AFS_VNODE_TO_I(vnode);
_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
vnode->status.type,
vnode->status.nlink,
(unsigned long long) vnode->status.size,
vnode->status.data_version,
vnode->status.mode);
switch (vnode->status.type) {
case AFS_FTYPE_FILE:
inode->i_mode = S_IFREG | vnode->status.mode;
inode->i_op = &afs_file_inode_operations;
inode->i_fop = &afs_file_operations;
break;
case AFS_FTYPE_DIR:
inode->i_mode = S_IFDIR | vnode->status.mode;
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
break;
case AFS_FTYPE_SYMLINK:
inode->i_mode = S_IFLNK | vnode->status.mode;
inode->i_op = &page_symlink_inode_operations;
break;
default:
printk("kAFS: AFS vnode with undefined type\n");
return -EBADMSG;
}
#ifdef CONFIG_AFS_FSCACHE
if (vnode->status.size != inode->i_size)
fscache_attr_changed(vnode->cache);
#endif
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_size = vnode->status.size;
inode->i_ctime.tv_sec = vnode->status.mtime_server;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
inode->i_version = vnode->status.data_version;
inode->i_mapping->a_ops = &afs_fs_aops;
/* check to see whether a symbolic link is really a mountpoint */
if (vnode->status.type == AFS_FTYPE_SYMLINK) {
afs_mntpt_check_symlink(vnode, key);
if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
inode->i_mode = S_IFDIR | vnode->status.mode;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
}
}
return 0;
}
/*
* iget5() comparator
*/
static int afs_iget5_test(struct inode *inode, void *opaque)
{
struct afs_iget_data *data = opaque;
return inode->i_ino == data->fid.vnode &&
inode->i_generation == data->fid.unique;
}
/*
* iget5() comparator for inode created by autocell operations
*
* These pseudo inodes don't match anything.
*/
static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
{
return 0;
}
/*
* iget5() inode initialiser
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
{
struct afs_iget_data *data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
inode->i_ino = data->fid.vnode;
inode->i_generation = data->fid.unique;
vnode->fid = data->fid;
vnode->volume = data->volume;
return 0;
}
/*
* inode retrieval for autocell
*/
struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
int namesz, struct key *key)
{
struct afs_iget_data data;
struct afs_super_info *as;
struct afs_vnode *vnode;
struct super_block *sb;
struct inode *inode;
static atomic_t afs_autocell_ino;
_enter("{%x:%u},%*.*s,",
AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
namesz, namesz, dev_name ?: "");
sb = dir->i_sb;
as = sb->s_fs_info;
data.volume = as->volume;
data.fid.vid = as->volume->vid;
data.fid.unique = 0;
data.fid.vnode = 0;
inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
afs_iget5_autocell_test, afs_iget5_set,
&data);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
_debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
inode, inode->i_ino, data.fid.vid, data.fid.vnode,
data.fid.unique);
vnode = AFS_FS_I(inode);
/* there shouldn't be an existing inode */
BUG_ON(!(inode->i_state & I_NEW));
inode->i_size = 0;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
inode->i_op = &afs_autocell_inode_operations;
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_ctime.tv_sec = get_seconds();
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_version = 0;
inode->i_generation = 0;
set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
unlock_new_inode(inode);
_leave(" = %p", inode);
return inode;
}
/*
* inode retrieval
*/
struct inode *afs_iget(struct super_block *sb, struct key *key,
struct afs_fid *fid, struct afs_file_status *status,
struct afs_callback *cb)
{
struct afs_iget_data data = { .fid = *fid };
struct afs_super_info *as;
struct afs_vnode *vnode;
struct inode *inode;
int ret;
_enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique);
as = sb->s_fs_info;
data.volume = as->volume;
inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
&data);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
_debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
inode, fid->vid, fid->vnode, fid->unique);
vnode = AFS_FS_I(inode);
/* deal with an existing inode */
if (!(inode->i_state & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
if (!status) {
/* it's a remotely extant inode */
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
goto bad_inode;
} else {
/* it's an inode we just created */
memcpy(&vnode->status, status, sizeof(vnode->status));
if (!cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
vnode->cb_version = 0;
vnode->cb_expiry = 0;
vnode->cb_type = 0;
vnode->cb_expires = get_seconds();
} else {
vnode->cb_version = cb->version;
vnode->cb_expiry = cb->expiry;
vnode->cb_type = cb->type;
vnode->cb_expires = vnode->cb_expiry + get_seconds();
}
}
/* set up caching before mapping the status, as map-status reads the
* first page of symlinks to see if they're really mountpoints */
inode->i_size = vnode->status.size;
#ifdef CONFIG_AFS_FSCACHE
vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
&afs_vnode_cache_index_def,
vnode);
#endif
ret = afs_inode_map_status(vnode, key);
if (ret < 0)
goto bad_inode;
/* success */
clear_bit(AFS_VNODE_UNSET, &vnode->flags);
inode->i_flags |= S_NOATIME;
unlock_new_inode(inode);
_leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type);
return inode;
/* failure */
bad_inode:
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(vnode->cache, 0);
vnode->cache = NULL;
#endif
iget_failed(inode);
_leave(" = %d [bad]", ret);
return ERR_PTR(ret);
}
/*
* mark the data attached to an inode as obsolete due to a write on the server
* - might also want to ditch all the outstanding writes and dirty pages
*/
void afs_zap_data(struct afs_vnode *vnode)
{
_enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
/* nuke all the non-dirty pages that aren't locked, mapped or being
* written back in a regular file and completely discard the pages in a
* directory or symlink */
if (S_ISREG(vnode->vfs_inode.i_mode))
invalidate_remote_inode(&vnode->vfs_inode);
else
invalidate_inode_pages2(vnode->vfs_inode.i_mapping);
}
/*
* validate a vnode/inode
* - there are several things we need to check
* - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
* symlink)
* - parent dir metadata changed (security changes)
* - dentry data changed (write, truncate)
* - dentry metadata changed (security changes)
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
int ret;
_enter("{v={%x:%u} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
if (vnode->cb_promised &&
!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
!test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
if (vnode->cb_expires < get_seconds() + 10) {
_debug("callback expired");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
} else {
goto valid;
}
}
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
goto valid;
mutex_lock(&vnode->validate_lock);
/* if the promise has expired, we need to check the server again to get
* a new promise - note that if the (parent) directory's metadata was
* changed then the security may be different and we may no longer have
* access */
if (!vnode->cb_promised ||
test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
_debug("not promised");
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
goto error_unlock;
_debug("new promise [fl=%lx]", vnode->flags);
}
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
_debug("file already deleted");
ret = -ESTALE;
goto error_unlock;
}
/* if the vnode's data version number changed then its contents are
* different */
if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
afs_zap_data(vnode);
clear_bit(AFS_VNODE_MODIFIED, &vnode->flags);
mutex_unlock(&vnode->validate_lock);
valid:
_leave(" = 0");
return 0;
error_unlock:
mutex_unlock(&vnode->validate_lock);
_leave(" = %d", ret);
return ret;
}
/*
* read the attributes of an inode
*/
int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
inode = dentry->d_inode;
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
generic_fillattr(inode, stat);
return 0;
}
/*
* discard an AFS inode
*/
int afs_drop_inode(struct inode *inode)
{
_enter("");
if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
return generic_delete_inode(inode);
else
return generic_drop_inode(inode);
}
/*
* clear an AFS inode
*/
void afs_evict_inode(struct inode *inode)
{
struct afs_permits *permits;
struct afs_vnode *vnode;
vnode = AFS_FS_I(inode);
_enter("{%x:%u.%d} v=%u x=%u t=%u }",
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
vnode->cb_version,
vnode->cb_expiry,
vnode->cb_type);
_debug("CLEAR INODE %p", inode);
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
afs_give_up_callback(vnode);
if (vnode->server) {
spin_lock(&vnode->server->fs_lock);
rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes);
spin_unlock(&vnode->server->fs_lock);
afs_put_server(vnode->server);
vnode->server = NULL;
}
ASSERT(list_empty(&vnode->writebacks));
ASSERT(!vnode->cb_promised);
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(vnode->cache, 0);
vnode->cache = NULL;
#endif
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
rcu_assign_pointer(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
call_rcu(&permits->rcu, afs_zap_permits);
_leave("");
}
/*
* set the attributes of an inode
*/
int afs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
struct key *key;
int ret;
_enter("{%x:%u},{n=%s},%x",
vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
attr->ia_valid);
if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
ATTR_MTIME))) {
_leave(" = 0 [unsupported]");
return 0;
}
/* flush any dirty data outstanding on a regular file */
if (S_ISREG(vnode->vfs_inode.i_mode)) {
filemap_write_and_wait(vnode->vfs_inode.i_mapping);
afs_writeback_all(vnode);
}
if (attr->ia_valid & ATTR_FILE) {
key = attr->ia_file->private_data;
} else {
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error;
}
}
ret = afs_vnode_setattr(vnode, key, attr);
if (!(attr->ia_valid & ATTR_FILE))
key_put(key);
error:
_leave(" = %d", ret);
return ret;
}
| gpl-2.0 |
rex-xxx/mt6572_x201 | kernel/lib/kobject.c | 3059 | 23758 | /*
* kobject.c - library routines for handling generic kernel objects
*
* Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
* This file is released under the GPLv2.
*
*
* Please see the file Documentation/kobject.txt for critical information
* about using the kobject interface.
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
*
* Most subsystems have a set of default attributes that are associated
* with an object that registers with them. This is a helper called during
* object registration that loops through the default attributes of the
* subsystem and creates attributes files for them in sysfs.
*/
static int populate_dir(struct kobject *kobj)
{
struct kobj_type *t = get_ktype(kobj);
struct attribute *attr;
int error = 0;
int i;
if (t && t->default_attrs) {
for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
error = sysfs_create_file(kobj, attr);
if (error)
break;
}
}
return error;
}
static int create_dir(struct kobject *kobj)
{
int error = 0;
if (kobject_name(kobj)) {
error = sysfs_create_dir(kobj);
if (!error) {
error = populate_dir(kobj);
if (error)
sysfs_remove_dir(kobj);
}
}
return error;
}
static int get_kobj_path_length(struct kobject *kobj)
{
int length = 1;
struct kobject *parent = kobj;
/* walk up the ancestors until we hit the one pointing to the
* root.
* Add 1 to strlen for leading '/' of each level.
*/
do {
if (kobject_name(parent) == NULL)
return 0;
length += strlen(kobject_name(parent)) + 1;
parent = parent->parent;
} while (parent);
return length;
}
static void fill_kobj_path(struct kobject *kobj, char *path, int length)
{
struct kobject *parent;
--length;
for (parent = kobj; parent; parent = parent->parent) {
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
strncpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
}
/**
* kobject_get_path - generate and return the path associated with a given kobj and kset pair.
*
* @kobj: kobject in question, with which to build the path
* @gfp_mask: the allocation type used to allocate the path
*
* The result must be freed by the caller with kfree().
*/
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
{
char *path;
int len;
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
fill_kobj_path(kobj, path, len);
return path;
}
EXPORT_SYMBOL_GPL(kobject_get_path);
/* add the kobject to its kset's list */
static void kobj_kset_join(struct kobject *kobj)
{
if (!kobj->kset)
return;
kset_get(kobj->kset);
spin_lock(&kobj->kset->list_lock);
list_add_tail(&kobj->entry, &kobj->kset->list);
spin_unlock(&kobj->kset->list_lock);
}
/* remove the kobject from its kset's list */
static void kobj_kset_leave(struct kobject *kobj)
{
if (!kobj->kset)
return;
spin_lock(&kobj->kset->list_lock);
list_del_init(&kobj->entry);
spin_unlock(&kobj->kset->list_lock);
kset_put(kobj->kset);
}
static void kobject_init_internal(struct kobject *kobj)
{
if (!kobj)
return;
kref_init(&kobj->kref);
INIT_LIST_HEAD(&kobj->entry);
kobj->state_in_sysfs = 0;
kobj->state_add_uevent_sent = 0;
kobj->state_remove_uevent_sent = 0;
kobj->state_initialized = 1;
}
static int kobject_add_internal(struct kobject *kobj)
{
int error = 0;
struct kobject *parent;
if (!kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
WARN(1, "kobject: (%p): attempted to be registered with empty "
"name!\n", kobj);
return -EINVAL;
}
parent = kobject_get(kobj->parent);
/* join kset if set, use it as parent if we do not already have one */
if (kobj->kset) {
if (!parent)
parent = kobject_get(&kobj->kset->kobj);
kobj_kset_join(kobj);
kobj->parent = parent;
}
pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
kobject_name(kobj), kobj, __func__,
parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
error = create_dir(kobj);
if (error) {
kobj_kset_leave(kobj);
kobject_put(parent);
kobj->parent = NULL;
/* be noisy on error issues */
if (error == -EEXIST)
WARN(1, "%s failed for %s with "
"-EEXIST, don't try to register things with "
"the same name in the same directory.\n",
__func__, kobject_name(kobj));
else
WARN(1, "%s failed for %s (error: %d parent: %s)\n",
__func__, kobject_name(kobj), error,
parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
return error;
}
/**
* kobject_set_name_vargs - Set the name of an kobject
* @kobj: struct kobject to set the name of
* @fmt: format string used to build the name
* @vargs: vargs to format the string.
*/
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
const char *old_name = kobj->name;
char *s;
if (kobj->name && !fmt)
return 0;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (!kobj->name)
return -ENOMEM;
/* ewww... some of these buggers have '/' in the name ... */
while ((s = strchr(kobj->name, '/')))
s[0] = '!';
kfree(old_name);
return 0;
}
/**
* kobject_set_name - Set the name of a kobject
* @kobj: struct kobject to set the name of
* @fmt: format string used to build the name
*
* This sets the name of the kobject. If you have already added the
* kobject to the system, you must call kobject_rename() in order to
* change the name of the kobject.
*/
int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
va_list vargs;
int retval;
va_start(vargs, fmt);
retval = kobject_set_name_vargs(kobj, fmt, vargs);
va_end(vargs);
return retval;
}
EXPORT_SYMBOL(kobject_set_name);
/**
* kobject_init - initialize a kobject structure
* @kobj: pointer to the kobject to initialize
* @ktype: pointer to the ktype for this kobject.
*
* This function will properly initialize a kobject such that it can then
* be passed to the kobject_add() call.
*
* After this function is called, the kobject MUST be cleaned up by a call
* to kobject_put(), not by a call to kfree directly to ensure that all of
* the memory is cleaned up properly.
*/
void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
{
char *err_str;
if (!kobj) {
err_str = "invalid kobject pointer!";
goto error;
}
if (!ktype) {
err_str = "must have a ktype to be initialized properly!\n";
goto error;
}
if (kobj->state_initialized) {
/* do not error out as sometimes we can recover */
printk(KERN_ERR "kobject (%p): tried to init an initialized "
"object, something is seriously wrong.\n", kobj);
dump_stack();
}
kobject_init_internal(kobj);
kobj->ktype = ktype;
return;
error:
printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str);
dump_stack();
}
EXPORT_SYMBOL(kobject_init);
static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
const char *fmt, va_list vargs)
{
int retval;
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
printk(KERN_ERR "kobject: can not set name properly!\n");
return retval;
}
kobj->parent = parent;
return kobject_add_internal(kobj);
}
/**
* kobject_add - the main kobject add function
* @kobj: the kobject to add
* @parent: pointer to the parent of the kobject.
* @fmt: format to name the kobject with.
*
* The kobject name is set and added to the kobject hierarchy in this
* function.
*
* If @parent is set, then the parent of the @kobj will be set to it.
* If @parent is NULL, then the parent of the @kobj will be set to the
* kobject associted with the kset assigned to this kobject. If no kset
* is assigned to the kobject, then the kobject will be located in the
* root of the sysfs tree.
*
* If this function returns an error, kobject_put() must be called to
* properly clean up the memory associated with the object.
* Under no instance should the kobject that is passed to this function
* be directly freed with a call to kfree(), that can leak memory.
*
* Note, no "add" uevent will be created with this call, the caller should set
* up all of the necessary sysfs files for the object and then call
* kobject_uevent() with the UEVENT_ADD parameter to ensure that
* userspace is properly notified of this kobject's creation.
*/
int kobject_add(struct kobject *kobj, struct kobject *parent,
const char *fmt, ...)
{
va_list args;
int retval;
if (!kobj)
return -EINVAL;
if (!kobj->state_initialized) {
printk(KERN_ERR "kobject '%s' (%p): tried to add an "
"uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
dump_stack();
return -EINVAL;
}
va_start(args, fmt);
retval = kobject_add_varg(kobj, parent, fmt, args);
va_end(args);
return retval;
}
EXPORT_SYMBOL(kobject_add);
/**
* kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy
* @kobj: pointer to the kobject to initialize
* @ktype: pointer to the ktype for this kobject.
* @parent: pointer to the parent of this kobject.
* @fmt: the name of the kobject.
*
* This function combines the call to kobject_init() and
* kobject_add(). The same type of error handling after a call to
* kobject_add() and kobject lifetime rules are the same here.
*/
int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...)
{
va_list args;
int retval;
kobject_init(kobj, ktype);
va_start(args, fmt);
retval = kobject_add_varg(kobj, parent, fmt, args);
va_end(args);
return retval;
}
EXPORT_SYMBOL_GPL(kobject_init_and_add);
/**
* kobject_rename - change the name of an object
* @kobj: object in question.
* @new_name: object's new name
*
* It is the responsibility of the caller to provide mutual
* exclusion between two different calls of kobject_rename
* on the same kobject and to ensure that new_name is valid and
* won't conflict with other kobjects.
*/
int kobject_rename(struct kobject *kobj, const char *new_name)
{
int error = 0;
const char *devpath = NULL;
const char *dup_name = NULL, *name;
char *devpath_string = NULL;
char *envp[2];
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
if (!kobj->parent)
return -EINVAL;
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
goto out;
}
devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
if (!devpath_string) {
error = -ENOMEM;
goto out;
}
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
name = dup_name = kstrdup(new_name, GFP_KERNEL);
if (!name) {
error = -ENOMEM;
goto out;
}
error = sysfs_rename_dir(kobj, new_name);
if (error)
goto out;
/* Install the new kobject name */
dup_name = kobj->name;
kobj->name = name;
/* This function is mostly/only used for network interface.
* Some hotplug package track interfaces by their name and
* therefore want to know when the name is changed by the user. */
kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
kfree(dup_name);
kfree(devpath_string);
kfree(devpath);
kobject_put(kobj);
return error;
}
EXPORT_SYMBOL_GPL(kobject_rename);
/**
* kobject_move - move object to another parent
* @kobj: object in question.
* @new_parent: object's new parent (can be NULL)
*/
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
{
int error;
struct kobject *old_parent;
const char *devpath = NULL;
char *devpath_string = NULL;
char *envp[2];
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
new_parent = kobject_get(new_parent);
if (!new_parent) {
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
goto out;
}
devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
if (!devpath_string) {
error = -ENOMEM;
goto out;
}
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
error = sysfs_move_dir(kobj, new_parent);
if (error)
goto out;
old_parent = kobj->parent;
kobj->parent = new_parent;
new_parent = NULL;
kobject_put(old_parent);
kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
kobject_put(new_parent);
kobject_put(kobj);
kfree(devpath_string);
kfree(devpath);
return error;
}
/**
* kobject_del - unlink kobject from hierarchy.
* @kobj: object.
*/
void kobject_del(struct kobject *kobj)
{
if (!kobj)
return;
sysfs_remove_dir(kobj);
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
kobject_put(kobj->parent);
kobj->parent = NULL;
}
/**
* kobject_get - increment refcount for object.
* @kobj: object.
*/
struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj)
kref_get(&kobj->kref);
return kobj;
}
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
*/
static void kobject_cleanup(struct kobject *kobj)
{
struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
pr_debug("kobject: '%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
if (t && !t->release)
pr_debug("kobject: '%s' (%p): does not have a release() "
"function, it is broken and must be fixed.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
kobject_name(kobj), kobj);
kobject_uevent(kobj, KOBJ_REMOVE);
}
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
kobject_name(kobj), kobj);
kobject_del(kobj);
}
if (t && t->release) {
pr_debug("kobject: '%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
}
/* free name if we allocated it */
if (name) {
pr_debug("kobject: '%s': free name\n", name);
kfree(name);
}
}
static void kobject_release(struct kref *kref)
{
kobject_cleanup(container_of(kref, struct kobject, kref));
}
/**
* kobject_put - decrement refcount for object.
* @kobj: object.
*
* Decrement the refcount, and if 0, call kobject_cleanup().
*/
void kobject_put(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
"initialized, yet kobject_put() is being "
"called.\n", kobject_name(kobj), kobj);
kref_put(&kobj->kref, kobject_release);
}
}
static void dynamic_kobj_release(struct kobject *kobj)
{
pr_debug("kobject: (%p): %s\n", kobj, __func__);
kfree(kobj);
}
static struct kobj_type dynamic_kobj_ktype = {
.release = dynamic_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
/**
* kobject_create - create a struct kobject dynamically
*
* This function creates a kobject structure dynamically and sets it up
* to be a "dynamic" kobject with a default release function set up.
*
* If the kobject was not able to be created, NULL will be returned.
* The kobject structure returned from here must be cleaned up with a
* call to kobject_put() and not kfree(), as kobject_init() has
* already been called on this structure.
*/
struct kobject *kobject_create(void)
{
struct kobject *kobj;
kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
if (!kobj)
return NULL;
kobject_init(kobj, &dynamic_kobj_ktype);
return kobj;
}
/**
* kobject_create_and_add - create a struct kobject dynamically and register it with sysfs
*
* @name: the name for the kset
* @parent: the parent kobject of this kobject, if any.
*
* This function creates a kobject structure dynamically and registers it
* with sysfs. When you are finished with this structure, call
* kobject_put() and the structure will be dynamically freed when
* it is no longer being used.
*
* If the kobject was not able to be created, NULL will be returned.
*/
struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
{
struct kobject *kobj;
int retval;
kobj = kobject_create();
if (!kobj)
return NULL;
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
printk(KERN_WARNING "%s: kobject_add error: %d\n",
__func__, retval);
kobject_put(kobj);
kobj = NULL;
}
return kobj;
}
EXPORT_SYMBOL_GPL(kobject_create_and_add);
/**
* kset_init - initialize a kset for use
* @k: kset
*/
void kset_init(struct kset *k)
{
kobject_init_internal(&k->kobj);
INIT_LIST_HEAD(&k->list);
spin_lock_init(&k->list_lock);
}
/* default kobject attribute operations */
static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show)
ret = kattr->show(kobj, kattr, buf);
return ret;
}
static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store)
ret = kattr->store(kobj, kattr, buf, count);
return ret;
}
const struct sysfs_ops kobj_sysfs_ops = {
.show = kobj_attr_show,
.store = kobj_attr_store,
};
/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
int kset_register(struct kset *k)
{
int err;
if (!k)
return -EINVAL;
kset_init(k);
err = kobject_add_internal(&k->kobj);
if (err)
return err;
kobject_uevent(&k->kobj, KOBJ_ADD);
return 0;
}
/**
* kset_unregister - remove a kset.
* @k: kset.
*/
void kset_unregister(struct kset *k)
{
if (!k)
return;
kobject_put(&k->kobj);
}
/**
* kset_find_obj - search for object in kset.
* @kset: kset we're looking in.
* @name: object's name.
*
* Lock kset via @kset->subsys, and iterate over @kset->list,
* looking for a matching kobject. If matching object is found
* take a reference and return the object.
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
break;
}
}
spin_unlock(&kset->list_lock);
return ret;
}
static void kset_release(struct kobject *kobj)
{
struct kset *kset = container_of(kobj, struct kset, kobj);
pr_debug("kobject: '%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
kfree(kset);
}
static struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = kset_release,
};
/**
* kset_create - create a struct kset dynamically
*
* @name: the name for the kset
* @uevent_ops: a struct kset_uevent_ops for the kset
* @parent_kobj: the parent kobject of this kset, if any.
*
* This function creates a kset structure dynamically. This structure can
* then be registered with the system and show up in sysfs with a call to
* kset_register(). When you are finished with this structure, if
* kset_register() has been called, call kset_unregister() and the
* structure will be dynamically freed when it is no longer being used.
*
* If the kset was not able to be created, NULL will be returned.
*/
static struct kset *kset_create(const char *name,
const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
int retval;
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
if (!kset)
return NULL;
retval = kobject_set_name(&kset->kobj, name);
if (retval) {
kfree(kset);
return NULL;
}
kset->uevent_ops = uevent_ops;
kset->kobj.parent = parent_kobj;
/*
* The kobject of this kset will have a type of kset_ktype and belong to
* no kset itself. That way we can properly free it when it is
* finished being used.
*/
kset->kobj.ktype = &kset_ktype;
kset->kobj.kset = NULL;
return kset;
}
/**
* kset_create_and_add - create a struct kset dynamically and add it to sysfs
*
* @name: the name for the kset
* @uevent_ops: a struct kset_uevent_ops for the kset
* @parent_kobj: the parent kobject of this kset, if any.
*
* This function creates a kset structure dynamically and registers it
* with sysfs. When you are finished with this structure, call
* kset_unregister() and the structure will be dynamically freed when it
* is no longer being used.
*
* If the kset was not able to be created, NULL will be returned.
*/
struct kset *kset_create_and_add(const char *name,
const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
int error;
kset = kset_create(name, uevent_ops, parent_kobj);
if (!kset)
return NULL;
error = kset_register(kset);
if (error) {
kfree(kset);
return NULL;
}
return kset;
}
EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
{
enum kobj_ns_type type = ops->type;
int error;
spin_lock(&kobj_ns_type_lock);
error = -EINVAL;
if (type >= KOBJ_NS_TYPES)
goto out;
error = -EINVAL;
if (type <= KOBJ_NS_TYPE_NONE)
goto out;
error = -EBUSY;
if (kobj_ns_ops_tbl[type])
goto out;
error = 0;
kobj_ns_ops_tbl[type] = ops;
out:
spin_unlock(&kobj_ns_type_lock);
return error;
}
int kobj_ns_type_registered(enum kobj_ns_type type)
{
int registered = 0;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
registered = kobj_ns_ops_tbl[type] != NULL;
spin_unlock(&kobj_ns_type_lock);
return registered;
}
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
{
const struct kobj_ns_type_operations *ops = NULL;
if (parent && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
}
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
{
return kobj_child_ns_ops(kobj->parent);
}
void *kobj_ns_grab_current(enum kobj_ns_type type)
{
void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->grab_current_ns();
spin_unlock(&kobj_ns_type_lock);
return ns;
}
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
{
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
spin_unlock(&kobj_ns_type_lock);
return ns;
}
const void *kobj_ns_initial(enum kobj_ns_type type)
{
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->initial_ns();
spin_unlock(&kobj_ns_type_lock);
return ns;
}
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
}
EXPORT_SYMBOL(kobject_get);
EXPORT_SYMBOL(kobject_put);
EXPORT_SYMBOL(kobject_del);
EXPORT_SYMBOL(kset_register);
EXPORT_SYMBOL(kset_unregister);
| gpl-2.0 |
schqiushui/kernel_kk442_sense_dlx | arch/tile/kernel/compat.c | 4595 | 3564 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/* Adjust unistd.h to provide 32-bit numbers and functions. */
#define __SYSCALL_COMPAT
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
/*
* Syscalls that take 64-bit numbers traditionally take them in 32-bit
* "high" and "low" value parts on 32-bit architectures.
* In principle, one could imagine passing some register arguments as
* fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to
* adapt the usual convention.
*/
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
{
return sys_truncate(filename, ((loff_t)high << 32) | low);
}
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
{
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
}
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high)
{
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high)
{
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
{
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
}
long compat_sys_sync_file_range2(int fd, unsigned int flags,
u32 offset_lo, u32 offset_hi,
u32 nbytes_lo, u32 nbytes_hi)
{
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)nbytes_hi << 32) | nbytes_lo,
flags);
}
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi)
{
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)len_hi << 32) | len_lo);
}
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval)
{
struct timespec t;
int ret;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_rr_get_interval(pid,
(struct timespec __force __user *)&t);
set_fs(old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
/* See comments in sys.c */
#define compat_sys_fadvise64_64 sys32_fadvise64_64
#define compat_sys_readahead sys32_readahead
/* Call the trampolines to manage pt_regs where necessary. */
#define compat_sys_execve _compat_sys_execve
#define compat_sys_sigaltstack _compat_sys_sigaltstack
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
#define sys_clone _sys_clone
/*
* Note that we can't include <linux/unistd.h> here since the header
* guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
*/
void *compat_sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls-1] = sys_ni_syscall,
#include <asm/unistd.h>
};
| gpl-2.0 |
zarboz/android_kernel_htc_dlx | virt/drivers/sh/intc/virq.c | 4851 | 6220 | /*
* Support for virtual IRQ subgroups.
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include "internals.h"
static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
struct intc_virq_list {
unsigned int irq;
struct intc_virq_list *next;
};
#define for_each_virq(entry, head) \
for (entry = head; entry; entry = entry->next)
/*
* Tags for the radix tree
*/
#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&intc_big_lock, flags);
intc_irq_xlate[irq].enum_id = id;
intc_irq_xlate[irq].desc = d;
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
{
return intc_irq_xlate + irq;
}
int intc_irq_lookup(const char *chipname, intc_enum enum_id)
{
struct intc_map_entry *ptr;
struct intc_desc_int *d;
int irq = -1;
list_for_each_entry(d, &intc_list, list) {
int tagged;
if (strcmp(d->chip.name, chipname) != 0)
continue;
/*
* Catch early lookups for subgroup VIRQs that have not
* yet been allocated an IRQ. This already includes a
* fast-path out if the tree is untagged, so there is no
* need to explicitly test the root tree.
*/
tagged = radix_tree_tag_get(&d->tree, enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
if (unlikely(tagged))
break;
ptr = radix_tree_lookup(&d->tree, enum_id);
if (ptr) {
irq = ptr - intc_irq_xlate;
break;
}
}
return irq;
}
EXPORT_SYMBOL_GPL(intc_irq_lookup);
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
{
struct intc_virq_list **last, *entry;
struct irq_data *data = irq_get_irq_data(irq);
/* scan for duplicates */
last = (struct intc_virq_list **)&data->handler_data;
for_each_virq(entry, data->handler_data) {
if (entry->irq == virq)
return 0;
last = &entry->next;
}
entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
if (!entry) {
pr_err("can't allocate VIRQ mapping for %d\n", virq);
return -ENOMEM;
}
entry->irq = virq;
*last = entry;
return 0;
}
static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
struct intc_desc_int *d = get_intc_desc(irq);
chip->irq_mask_ack(data);
for_each_virq(entry, vlist) {
unsigned long addr, handle;
handle = (unsigned long)irq_get_handler_data(entry->irq);
addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
generic_handle_irq(entry->irq);
}
chip->irq_unmask(data);
}
static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
struct intc_desc_int *d,
unsigned int index)
{
unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
0, 1, (subgroup->reg_width - 1) - index);
}
static void __init intc_subgroup_init_one(struct intc_desc *desc,
struct intc_desc_int *d,
struct intc_subgroup *subgroup)
{
struct intc_map_entry *mapped;
unsigned int pirq;
unsigned long flags;
int i;
mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
if (!mapped) {
WARN_ON(1);
return;
}
pirq = mapped - intc_irq_xlate;
raw_spin_lock_irqsave(&d->lock, flags);
for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
struct intc_subgroup_entry *entry;
int err;
if (!subgroup->enum_ids[i])
continue;
entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
if (!entry)
break;
entry->pirq = pirq;
entry->enum_id = subgroup->enum_ids[i];
entry->handle = intc_subgroup_data(subgroup, d, i);
err = radix_tree_insert(&d->tree, entry->enum_id, entry);
if (unlikely(err < 0))
break;
radix_tree_tag_set(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
}
raw_spin_unlock_irqrestore(&d->lock, flags);
}
void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
{
int i;
if (!desc->hw.subgroups)
return;
for (i = 0; i < desc->hw.nr_subgroups; i++)
intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
}
static void __init intc_subgroup_map(struct intc_desc_int *d)
{
struct intc_subgroup_entry *entries[32];
unsigned long flags;
unsigned int nr_found;
int i;
raw_spin_lock_irqsave(&d->lock, flags);
restart:
nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
(void ***)entries, 0, ARRAY_SIZE(entries),
INTC_TAG_VIRQ_NEEDS_ALLOC);
for (i = 0; i < nr_found; i++) {
struct intc_subgroup_entry *entry;
int irq;
entry = radix_tree_deref_slot((void **)entries[i]);
if (unlikely(!entry))
continue;
if (radix_tree_deref_retry(entry))
goto restart;
irq = create_irq();
if (unlikely(irq < 0)) {
pr_err("no more free IRQs, bailing..\n");
break;
}
pr_info("Setting up a chained VIRQ from %d -> %d\n",
irq, entry->pirq);
intc_irq_xlate_set(irq, entry->enum_id, d);
irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
handle_simple_irq, "virq");
irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
irq_set_handler_data(irq, (void *)entry->handle);
/*
* Set the virtual IRQ as non-threadable.
*/
irq_set_nothread(irq);
irq_set_chained_handler(entry->pirq, intc_virq_handler);
add_virq_to_pirq(entry->pirq, irq);
radix_tree_tag_clear(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
radix_tree_replace_slot((void **)entries[i],
&intc_irq_xlate[irq]);
}
raw_spin_unlock_irqrestore(&d->lock, flags);
}
void __init intc_finalize(void)
{
struct intc_desc_int *d;
list_for_each_entry(d, &intc_list, list)
if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
intc_subgroup_map(d);
}
| gpl-2.0 |
taozhijiang/linux | scripts/dtc/libfdt/fdt_empty_tree.c | 6131 | 2903 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2012 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
int fdt_create_empty_tree(void *buf, int bufsize)
{
int err;
err = fdt_create(buf, bufsize);
if (err)
return err;
err = fdt_finish_reservemap(buf);
if (err)
return err;
err = fdt_begin_node(buf, "");
if (err)
return err;
err = fdt_end_node(buf);
if (err)
return err;
err = fdt_finish(buf);
if (err)
return err;
return fdt_open_into(buf, buf, bufsize);
}
| gpl-2.0 |
TeamBliss-Devices/android_kernel_asus_grouper | arch/microblaze/lib/memset.c | 7667 | 2427 | /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2007 John Williams
*
* Reasonably optimised generic C-code for memset on Microblaze
* This is generic C code to do efficient, alignment-aware memcpy.
*
* It is based on demo code originally Copyright 2001 by Intel Corp, taken from
* http://www.embedded.com/showArticle.jhtml?articleID=19205567
*
* Attempts were made, unsuccessfully, to contact the original
* author of this code (Michael Morrow, Intel). Below is the original
* copyright notice.
*
* This software has been developed by Intel Corporation.
* Intel specifically disclaims all warranties, express or
* implied, and all liability, including consequential and
* other indirect damages, for the use of this program, including
* liability for infringement of any proprietary rights,
* and including the warranties of merchantability and fitness
* for a particular purpose. Intel does not assume any
* responsibility for and errors which may appear in this program
* not any responsibility to update it.
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/string.h>
#ifdef __HAVE_ARCH_MEMSET
#ifndef CONFIG_OPT_LIB_FUNCTION
void *memset(void *v_src, int c, __kernel_size_t n)
{
char *src = v_src;
/* Truncate c to 8 bits */
c = (c & 0xFF);
/* Simple, byte oriented memset or the rest of count. */
while (n--)
*src++ = c;
return v_src;
}
#else /* CONFIG_OPT_LIB_FUNCTION */
void *memset(void *v_src, int c, __kernel_size_t n)
{
char *src = v_src;
uint32_t *i_src;
uint32_t w32 = 0;
/* Truncate c to 8 bits */
c = (c & 0xFF);
if (unlikely(c)) {
/* Make a repeating word out of it */
w32 = c;
w32 |= w32 << 8;
w32 |= w32 << 16;
}
if (likely(n >= 4)) {
/* Align the destination to a word boundary */
/* This is done in an endian independent manner */
switch ((unsigned) src & 3) {
case 1:
*src++ = c;
--n;
case 2:
*src++ = c;
--n;
case 3:
*src++ = c;
--n;
}
i_src = (void *)src;
/* Do as many full-word copies as we can */
for (; n >= 4; n -= 4)
*i_src++ = w32;
src = (void *)i_src;
}
/* Simple, byte oriented memset or the rest of count. */
while (n--)
*src++ = c;
return v_src;
}
#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memset);
#endif /* __HAVE_ARCH_MEMSET */
| gpl-2.0 |
deatharrow/lge-kernel-e400 | drivers/mtd/maps/tsunami_flash.c | 8179 | 2494 | /*
* tsunami_flash.c
*
* flash chip on alpha ds10...
*/
#include <asm/io.h>
#include <asm/core_tsunami.h>
#include <linux/init.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#define FLASH_ENABLE_PORT 0x00C00001
#define FLASH_ENABLE_BYTE 0x01
#define FLASH_DISABLE_BYTE 0x00
#define MAX_TIG_FLASH_SIZE (12*1024*1024)
static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
{
map_word val;
val.x[0] = tsunami_tig_readb(offset);
return val;
}
static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
{
tsunami_tig_writeb(value.x[0], offset);
}
static void tsunami_flash_copy_from(
struct map_info *map, void *addr, unsigned long offset, ssize_t len)
{
unsigned char *dest;
dest = addr;
while(len && (offset < MAX_TIG_FLASH_SIZE)) {
*dest = tsunami_tig_readb(offset);
offset++;
dest++;
len--;
}
}
static void tsunami_flash_copy_to(
struct map_info *map, unsigned long offset,
const void *addr, ssize_t len)
{
const unsigned char *src;
src = addr;
while(len && (offset < MAX_TIG_FLASH_SIZE)) {
tsunami_tig_writeb(*src, offset);
offset++;
src++;
len--;
}
}
/*
* Deliberately don't provide operations wider than 8 bits. I don't
* have then and it scares me to think how you could mess up if
* you tried to use them. Buswidth is correctly so I'm safe.
*/
static struct map_info tsunami_flash_map = {
.name = "flash chip on the Tsunami TIG bus",
.size = MAX_TIG_FLASH_SIZE,
.phys = NO_XIP,
.bankwidth = 1,
.read = tsunami_flash_read8,
.copy_from = tsunami_flash_copy_from,
.write = tsunami_flash_write8,
.copy_to = tsunami_flash_copy_to,
};
static struct mtd_info *tsunami_flash_mtd;
static void __exit cleanup_tsunami_flash(void)
{
struct mtd_info *mtd;
mtd = tsunami_flash_mtd;
if (mtd) {
mtd_device_unregister(mtd);
map_destroy(mtd);
}
tsunami_flash_mtd = 0;
}
static int __init init_tsunami_flash(void)
{
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
char **type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
tsunami_flash_mtd = 0;
type = rom_probe_types;
for(; !tsunami_flash_mtd && *type; type++) {
tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
}
if (tsunami_flash_mtd) {
tsunami_flash_mtd->owner = THIS_MODULE;
mtd_device_register(tsunami_flash_mtd, NULL, 0);
return 0;
}
return -ENXIO;
}
module_init(init_tsunami_flash);
module_exit(cleanup_tsunami_flash);
| gpl-2.0 |
XuQiufeng/kernel_common | net/ipv4/tcp_veno.c | 8947 | 5820 | /*
* TCP Veno congestion control
*
* This is based on the congestion detection/avoidance scheme described in
* C. P. Fu, S. C. Liew.
* "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks."
* IEEE Journal on Selected Areas in Communication,
* Feb. 2003.
* See http://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <net/tcp.h>
/* Default values of the Veno variables, in fixed-point representation
* with V_PARAM_SHIFT bits to the right of the binary point.
*/
#define V_PARAM_SHIFT 1
static const int beta = 3 << V_PARAM_SHIFT;
/* Veno variables */
struct veno {
u8 doing_veno_now; /* if true, do veno for this rtt */
u16 cntrtt; /* # of rtts measured within last rtt */
u32 minrtt; /* min of rtts measured within last rtt (in usec) */
u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
u32 inc; /* decide whether to increase cwnd */
u32 diff; /* calculate the diff rate */
};
/* There are several situations when we must "re-start" Veno:
*
* o when a connection is established
* o after an RTO
* o after fast recovery
* o when we send a packet and there is no outstanding
* unacknowledged data (restarting an idle connection)
*
*/
static inline void veno_enable(struct sock *sk)
{
struct veno *veno = inet_csk_ca(sk);
/* turn on Veno */
veno->doing_veno_now = 1;
veno->minrtt = 0x7fffffff;
}
static inline void veno_disable(struct sock *sk)
{
struct veno *veno = inet_csk_ca(sk);
/* turn off Veno */
veno->doing_veno_now = 0;
}
static void tcp_veno_init(struct sock *sk)
{
struct veno *veno = inet_csk_ca(sk);
veno->basertt = 0x7fffffff;
veno->inc = 1;
veno_enable(sk);
}
/* Do rtt sampling needed for Veno. */
static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct veno *veno = inet_csk_ca(sk);
u32 vrtt;
if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < veno->basertt)
veno->basertt = vrtt;
/* Find the min rtt during the last rtt to find
* the current prop. delay + queuing delay:
*/
veno->minrtt = min(veno->minrtt, vrtt);
veno->cntrtt++;
}
static void tcp_veno_state(struct sock *sk, u8 ca_state)
{
if (ca_state == TCP_CA_Open)
veno_enable(sk);
else
veno_disable(sk);
}
/*
* If the connection is idle and we are restarting,
* then we don't want to do any Veno calculations
* until we get fresh rtt samples. So when we
* restart, we reset our Veno state to a clean
* state. After we get acks for this flight of
* packets, _then_ we can make Veno calculations
* again.
*/
static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
tcp_veno_init(sk);
}
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
if (!veno->doing_veno_now) {
tcp_reno_cong_avoid(sk, ack, in_flight);
return;
}
/* limited by applications */
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
/* We do the Veno calculations only if we got enough rtt samples */
if (veno->cntrtt <= 2) {
/* We don't have enough rtt samples to do the Veno
* calculation, so we'll behave like Reno.
*/
tcp_reno_cong_avoid(sk, ack, in_flight);
} else {
u64 target_cwnd;
u32 rtt;
/* We have enough rtt samples, so, using the Veno
* algorithm, we determine the state of the network.
*/
rtt = veno->minrtt;
target_cwnd = (tp->snd_cwnd * veno->basertt);
target_cwnd <<= V_PARAM_SHIFT;
do_div(target_cwnd, rtt);
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
if (veno->diff < beta) {
/* In the "non-congestive state", increase cwnd
* every rtt.
*/
tcp_cong_avoid_ai(tp, tp->snd_cwnd);
} else {
/* In the "congestive state", increase cwnd
* every other rtt.
*/
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
if (veno->inc &&
tp->snd_cwnd < tp->snd_cwnd_clamp) {
tp->snd_cwnd++;
veno->inc = 0;
} else
veno->inc = 1;
tp->snd_cwnd_cnt = 0;
} else
tp->snd_cwnd_cnt++;
}
}
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp;
}
/* Wipe the slate clean for the next rtt. */
/* veno->cntrtt = 0; */
veno->minrtt = 0x7fffffff;
}
/* Veno MD phase */
static u32 tcp_veno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
if (veno->diff < beta)
/* in "non-congestive state", cut cwnd by 1/5 */
return max(tp->snd_cwnd * 4 / 5, 2U);
else
/* in "congestive state", cut cwnd by 1/2 */
return max(tp->snd_cwnd >> 1U, 2U);
}
static struct tcp_congestion_ops tcp_veno __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_veno_init,
.ssthresh = tcp_veno_ssthresh,
.cong_avoid = tcp_veno_cong_avoid,
.pkts_acked = tcp_veno_pkts_acked,
.set_state = tcp_veno_state,
.cwnd_event = tcp_veno_cwnd_event,
.owner = THIS_MODULE,
.name = "veno",
};
static int __init tcp_veno_register(void)
{
BUILD_BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE);
tcp_register_congestion_control(&tcp_veno);
return 0;
}
static void __exit tcp_veno_unregister(void)
{
tcp_unregister_congestion_control(&tcp_veno);
}
module_init(tcp_veno_register);
module_exit(tcp_veno_unregister);
MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Veno");
| gpl-2.0 |
v-yadli/YadliKernel | drivers/isdn/sc/interrupt.c | 9203 | 6631 | /* $Id: interrupt.c,v 1.4.8.3 2001/09/23 22:24:59 kai Exp $
*
* Copyright (C) 1996 SpellCaster Telecommunications Inc.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For more information, please contact gpl-info@spellcast.com or write:
*
* SpellCaster Telecommunications Inc.
* 5621 Finch Avenue East, Unit #3
* Scarborough, Ontario Canada
* M1B 2T9
* +1 (416) 297-8565
* +1 (416) 297-6433 Facsimile
*/
#include "includes.h"
#include "hardware.h"
#include "message.h"
#include "card.h"
#include <linux/interrupt.h>
/*
*
*/
irqreturn_t interrupt_handler(int dummy, void *card_inst)
{
RspMessage rcvmsg;
int channel;
int card = (int)(unsigned long) card_inst;
if (!IS_VALID_CARD(card)) {
pr_debug("Invalid param: %d is not a valid card id\n", card);
return IRQ_NONE;
}
pr_debug("%s: Entered Interrupt handler\n",
sc_adapter[card]->devicename);
/*
* Pull all of the waiting messages off the response queue
*/
while (!receivemessage(card, &rcvmsg)) {
/*
* Push the message to the adapter structure for
* send_and_receive to snoop
*/
if (sc_adapter[card]->want_async_messages)
memcpy(&(sc_adapter[card]->async_msg),
&rcvmsg, sizeof(RspMessage));
channel = (unsigned int) rcvmsg.phy_link_no;
/*
* Trap Invalid request messages
*/
if (IS_CM_MESSAGE(rcvmsg, 0, 0, Invalid)) {
pr_debug("%s: Invalid request Message, rsp_status = %d\n",
sc_adapter[card]->devicename,
rcvmsg.rsp_status);
break;
}
/*
* Check for a linkRead message
*/
if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Read))
{
pr_debug("%s: Received packet 0x%x bytes long at 0x%lx\n",
sc_adapter[card]->devicename,
rcvmsg.msg_data.response.msg_len,
rcvmsg.msg_data.response.buff_offset);
rcvpkt(card, &rcvmsg);
continue;
}
/*
* Handle a write acknoledgement
*/
if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Write)) {
pr_debug("%s: Packet Send ACK on channel %d\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no);
sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].free_sendbufs++;
continue;
}
/*
* Handle a connection message
*/
if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Connect))
{
unsigned int callid;
setup_parm setup;
pr_debug("%s: Connect message: line %d: status %d: cause 0x%x\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no,
rcvmsg.rsp_status,
rcvmsg.msg_data.byte_array[2]);
memcpy(&callid, rcvmsg.msg_data.byte_array, sizeof(int));
if (callid >= 0x8000 && callid <= 0xFFFF)
{
pr_debug("%s: Got Dial-Out Rsp\n",
sc_adapter[card]->devicename);
indicate_status(card, ISDN_STAT_DCONN,
(unsigned long)rcvmsg.phy_link_no - 1, NULL);
}
else if (callid >= 0x0000 && callid <= 0x7FFF)
{
int len;
pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename);
len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
sizeof(setup.phone));
if (len >= sizeof(setup.phone))
continue;
len = strlcpy(setup.eazmsn,
sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
sizeof(setup.eazmsn));
if (len >= sizeof(setup.eazmsn))
continue;
setup.si1 = 7;
setup.si2 = 0;
setup.plan = 0;
setup.screen = 0;
indicate_status(card, ISDN_STAT_ICALL, (unsigned long)rcvmsg.phy_link_no - 1, (char *)&setup);
indicate_status(card, ISDN_STAT_DCONN, (unsigned long)rcvmsg.phy_link_no - 1, NULL);
}
continue;
}
/*
* Handle a disconnection message
*/
if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Disconnect))
{
pr_debug("%s: disconnect message: line %d: status %d: cause 0x%x\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no,
rcvmsg.rsp_status,
rcvmsg.msg_data.byte_array[2]);
indicate_status(card, ISDN_STAT_BHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL);
indicate_status(card, ISDN_STAT_DHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL);
continue;
}
/*
* Handle a startProc engine up message
*/
if (IS_CM_MESSAGE(rcvmsg, 5, 0, MiscEngineUp)) {
pr_debug("%s: Received EngineUp message\n",
sc_adapter[card]->devicename);
sc_adapter[card]->EngineUp = 1;
sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 1, 0, NULL);
sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 2, 0, NULL);
init_timer(&sc_adapter[card]->stat_timer);
sc_adapter[card]->stat_timer.function = check_phystat;
sc_adapter[card]->stat_timer.data = card;
sc_adapter[card]->stat_timer.expires = jiffies + CHECKSTAT_TIME;
add_timer(&sc_adapter[card]->stat_timer);
continue;
}
/*
* Start proc response
*/
if (IS_CM_MESSAGE(rcvmsg, 2, 0, StartProc)) {
pr_debug("%s: StartProc Response Status %d\n",
sc_adapter[card]->devicename,
rcvmsg.rsp_status);
continue;
}
/*
* Handle a GetMyNumber Rsp
*/
if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetMyNumber)) {
strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
rcvmsg.msg_data.byte_array,
sizeof(rcvmsg.msg_data.byte_array));
continue;
}
/*
* PhyStatus response
*/
if (IS_CE_MESSAGE(rcvmsg, Phy, 2, Status)) {
unsigned int b1stat, b2stat;
/*
* Covert the message data to the adapter->phystat code
*/
b1stat = (unsigned int) rcvmsg.msg_data.byte_array[0];
b2stat = (unsigned int) rcvmsg.msg_data.byte_array[1];
sc_adapter[card]->nphystat = (b2stat >> 8) | b1stat; /* endian?? */
pr_debug("%s: PhyStat is 0x%2x\n",
sc_adapter[card]->devicename,
sc_adapter[card]->nphystat);
continue;
}
/*
* Handle a GetFramFormat
*/
if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetFrameFormat)) {
if (rcvmsg.msg_data.byte_array[0] != HDLC_PROTO) {
unsigned int proto = HDLC_PROTO;
/*
* Set board format to HDLC if it wasn't already
*/
pr_debug("%s: current frame format: 0x%x, will change to HDLC\n",
sc_adapter[card]->devicename,
rcvmsg.msg_data.byte_array[0]);
sendmessage(card, CEPID, ceReqTypeCall,
ceReqClass0,
ceReqCallSetFrameFormat,
(unsigned char)channel + 1,
1, &proto);
}
continue;
}
/*
* Hmm...
*/
pr_debug("%s: Received unhandled message (%d,%d,%d) link %d\n",
sc_adapter[card]->devicename,
rcvmsg.type, rcvmsg.class, rcvmsg.code,
rcvmsg.phy_link_no);
} /* while */
pr_debug("%s: Exiting Interrupt Handler\n",
sc_adapter[card]->devicename);
return IRQ_HANDLED;
}
| gpl-2.0 |
leolas/ANDROID-KERNEL-QX1 | net/rds/rdma.c | 11251 | 23017 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we limit the size of a mr region? let transport return failure?
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
atomic_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, atomic_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = container_of(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
atomic_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
atomic_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me)
rds_destroy_mr(mr);
rds_mr_put(mr);
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
BUG_ON(irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
kfree(rm->atomic.op_notifier);
return ret;
}
| gpl-2.0 |
Evervolv/android_kernel_samsung_msm8660 | net/rds/rdma.c | 11251 | 23017 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we limit the size of a mr region? let transport return failure?
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
atomic_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, atomic_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = container_of(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
atomic_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
atomic_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me)
rds_destroy_mr(mr);
rds_mr_put(mr);
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
BUG_ON(irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
kfree(rm->atomic.op_notifier);
return ret;
}
| gpl-2.0 |
Fusion-Devices/android_kernel_lge_msm8994 | net/rds/rdma.c | 11251 | 23017 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we limit the size of a mr region? let transport return failure?
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
atomic_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, atomic_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = container_of(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
atomic_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
atomic_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me)
rds_destroy_mr(mr);
rds_mr_put(mr);
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
BUG_ON(irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
kfree(rm->atomic.op_notifier);
return ret;
}
| gpl-2.0 |
henrix/beagle-linux | arch/blackfin/kernel/perf_event.c | 244 | 11722 | /*
* Blackfin performance counters
*
* Copyright 2011 Analog Devices Inc.
*
* Ripped from SuperH version:
*
* Copyright (C) 2009 Paul Mundt
*
* Heavily based on the x86 and PowerPC implementations.
*
* x86:
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/perf_event.h>
#include <asm/bfin_pfmon.h>
/*
* We have two counters, and each counter can support an event type.
* The 'o' is PFCNTx=1 and 's' is PFCNTx=0
*
* 0x04 o pc invariant branches
* 0x06 o mispredicted branches
* 0x09 o predicted branches taken
* 0x0B o EXCPT insn
* 0x0C o CSYNC/SSYNC insn
* 0x0D o Insns committed
* 0x0E o Interrupts taken
* 0x0F o Misaligned address exceptions
* 0x80 o Code memory fetches stalled due to DMA
* 0x83 o 64bit insn fetches delivered
* 0x9A o data cache fills (bank a)
* 0x9B o data cache fills (bank b)
* 0x9C o data cache lines evicted (bank a)
* 0x9D o data cache lines evicted (bank b)
* 0x9E o data cache high priority fills
* 0x9F o data cache low priority fills
* 0x00 s loop 0 iterations
* 0x01 s loop 1 iterations
* 0x0A s CSYNC/SSYNC stalls
* 0x10 s DAG read/after write hazards
* 0x13 s RAW data hazards
* 0x81 s code TAG stalls
* 0x82 s code fill stalls
* 0x90 s processor to memory stalls
* 0x91 s data memory stalls not hidden by 0x90
* 0x92 s data store buffer full stalls
* 0x93 s data memory write buffer full stalls due to high->low priority
* 0x95 s data memory fill buffer stalls
* 0x96 s data TAG collision stalls
* 0x97 s data collision stalls
* 0x98 s data stalls
* 0x99 s data stalls sent to processor
*/
static const int event_map[] = {
/* use CYCLES cpu register */
[PERF_COUNT_HW_CPU_CYCLES] = -1,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
[PERF_COUNT_HW_CACHE_REFERENCES] = -1,
[PERF_COUNT_HW_CACHE_MISSES] = 0x83,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[C(L1D)] = { /* Data bank A */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS) ] = 0x9A,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS) ] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS) ] = 0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS) ] = 0x83,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS) ] = 0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS) ] = -1,
},
},
};
const char *perf_pmu_name(void)
{
return "bfin";
}
EXPORT_SYMBOL(perf_pmu_name);
int perf_num_counters(void)
{
return ARRAY_SIZE(event_map);
}
EXPORT_SYMBOL(perf_num_counters);
static u64 bfin_pfmon_read(int idx)
{
return bfin_read32(PFCNTR0 + (idx * 4));
}
static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
{
bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
}
static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
{
u32 val, mask;
val = PFPWR;
if (idx) {
mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
/* The packed config is for event0, so shift it to event1 slots */
val |= (hwc->config << (PFMON1_P - PFMON0_P));
val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
bfin_write_PFCNTR1(0);
} else {
mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
val |= hwc->config;
bfin_write_PFCNTR0(0);
}
bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
}
static void bfin_pfmon_disable_all(void)
{
bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
}
static void bfin_pfmon_enable_all(void)
{
bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
}
struct cpu_hw_events {
struct perf_event *events[MAX_HWEVENTS];
unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static int hw_perf_cache_event(int config, int *evp)
{
unsigned long type, op, result;
int ev;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = cache_events[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*evp = ev;
return 0;
}
static void bfin_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
u64 prev_raw_count, new_raw_count;
s64 delta;
int shift = 0;
/*
* Depending on the counter configuration, they may or may not
* be chained, in which case the previous counter value can be
* updated underneath us if the lower-half overflows.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic counter atomically.
*
* As there is no interrupt associated with the overflow events,
* this is the simplest approach for maintaining consistency.
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = bfin_pfmon_read(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (counter-)time and add that to the generic counter.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
}
static void bfin_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (!(event->hw.state & PERF_HES_STOPPED)) {
bfin_pfmon_disable(hwc, idx);
cpuc->events[idx] = NULL;
event->hw.state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
bfin_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void bfin_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
cpuc->events[idx] = event;
event->hw.state = 0;
bfin_pfmon_enable(hwc, idx);
}
static void bfin_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
bfin_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static int bfin_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret = -EAGAIN;
perf_pmu_disable(event->pmu);
if (__test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
if (idx == MAX_HWEVENTS)
goto out;
__set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
bfin_pfmon_disable(hwc, idx);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
bfin_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static void bfin_pmu_read(struct perf_event *event)
{
bfin_perf_event_update(event, &event->hw, event->hw.idx);
}
static int bfin_pmu_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int config = -1;
int ret;
if (attr->exclude_hv || attr->exclude_idle)
return -EPERM;
ret = 0;
switch (attr->type) {
case PERF_TYPE_RAW:
config = PFMON(0, attr->config & PFMON_MASK) |
PFCNT(0, !(attr->config & 0x100));
break;
case PERF_TYPE_HW_CACHE:
ret = hw_perf_cache_event(attr->config, &config);
break;
case PERF_TYPE_HARDWARE:
if (attr->config >= ARRAY_SIZE(event_map))
return -EINVAL;
config = event_map[attr->config];
break;
}
if (config == -1)
return -EINVAL;
if (!attr->exclude_kernel)
config |= PFCEN(0, PFCEN_ENABLE_SUPV);
if (!attr->exclude_user)
config |= PFCEN(0, PFCEN_ENABLE_USER);
hwc->config |= config;
return ret;
}
static void bfin_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i;
for (i = 0; i < MAX_HWEVENTS; ++i) {
event = cpuc->events[i];
if (!event)
continue;
hwc = &event->hw;
bfin_pfmon_enable(hwc, hwc->idx);
}
bfin_pfmon_enable_all();
}
static void bfin_pmu_disable(struct pmu *pmu)
{
bfin_pfmon_disable_all();
}
static struct pmu pmu = {
.pmu_enable = bfin_pmu_enable,
.pmu_disable = bfin_pmu_disable,
.event_init = bfin_pmu_event_init,
.add = bfin_pmu_add,
.del = bfin_pmu_del,
.start = bfin_pmu_start,
.stop = bfin_pmu_stop,
.read = bfin_pmu_read,
};
static void bfin_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
}
static int
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
bfin_write_PFCTL(0);
bfin_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init bfin_pmu_init(void)
{
int ret;
/*
* All of the on-chip counters are "limited", in that they have
* no interrupts, and are therefore unable to do sampling without
* further work and timer assistance.
*/
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (!ret)
perf_cpu_notifier(bfin_pmu_notifier);
return ret;
}
early_initcall(bfin_pmu_init);
| gpl-2.0 |
simone201/Talon-SH-Vibrant | drivers/net/ll_temac_main.c | 756 | 28467 | /*
* Driver for Xilinx TEMAC Ethernet device
*
* Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
* Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
*
* This is a driver for the Xilinx ll_temac ipcore which is often used
* in the Virtex and Spartan series of chips.
*
* Notes:
* - The ll_temac hardware uses indirect access for many of the TEMAC
* registers, include the MDIO bus. However, indirect access to MDIO
* registers take considerably more clock cycles than to TEMAC registers.
* MDIO accesses are long, so threads doing them should probably sleep
* rather than busywait. However, since only one indirect access can be
* in progress at any given time, that means that *all* indirect accesses
* could end up sleeping (to wait for an MDIO access to complete).
* Fortunately none of the indirect accesses are on the 'hot' path for tx
* or rx, so this should be okay.
*
* TODO:
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
* - Testing. Lots and lots of testing.
*
*/
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
#include <linux/udp.h> /* needed for sizeof(udphdr) */
#include <linux/phy.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include "ll_temac.h"
#define TX_BD_NUM 64
#define RX_BD_NUM 128
/* ---------------------------------------------------------------------
* Low level register access functions
*/
u32 temac_ior(struct temac_local *lp, int offset)
{
return in_be32((u32 *)(lp->regs + offset));
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
out_be32((u32 *) (lp->regs + offset), value);
}
int temac_indirect_busywait(struct temac_local *lp)
{
long end = jiffies + 2;
while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
if (end - jiffies <= 0) {
WARN_ON(1);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
/**
* temac_indirect_in32
*
* lp->indirect_mutex must be held when calling this function
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{
u32 val;
if (temac_indirect_busywait(lp))
return -ETIMEDOUT;
temac_iow(lp, XTE_CTL0_OFFSET, reg);
if (temac_indirect_busywait(lp))
return -ETIMEDOUT;
val = temac_ior(lp, XTE_LSW0_OFFSET);
return val;
}
/**
* temac_indirect_out32
*
* lp->indirect_mutex must be held when calling this function
*/
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{
if (temac_indirect_busywait(lp))
return;
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
/**
* temac_dma_in32 - Memory mapped DMA read, this function expects a
* register input that is based on DCR word addresses which
* are then converted to memory mapped byte addresses
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
/**
* temac_dma_out32 - Memory mapped DMA read, this function expects a
* register input that is based on DCR word addresses which
* are then converted to memory mapped byte addresses
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
}
/* DMA register access functions can be DCR based or memory mapped.
* The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
* memory mapped.
*/
#ifdef CONFIG_PPC_DCR
/**
* temac_dma_dcr_in32 - DCR based DMA read
*/
static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
{
return dcr_read(lp->sdma_dcrs, reg);
}
/**
* temac_dma_dcr_out32 - DCR based DMA write
*/
static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
struct device_node *np)
{
unsigned int dcrs;
/* setup the dcr address mapping if it's in the device tree */
dcrs = dcr_resource_start(np, 0);
if (dcrs != 0) {
lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
lp->dma_in = temac_dma_dcr_in;
lp->dma_out = temac_dma_dcr_out;
dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
return 0;
}
/* no DCR in the device tree, indicate a failure */
return -1;
}
#else
/*
* temac_dcr_setup - This is a stub for when DCR is not supported,
* such as with MicroBlaze
*/
static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
struct device_node *np)
{
return -1;
}
#endif
/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
int i;
lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL);
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
return -1;
}
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
}
/* ---------------------------------------------------------------------
* net_device_ops
*/
static int temac_set_mac_address(struct net_device *ndev, void *address)
{
struct temac_local *lp = netdev_priv(ndev);
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
random_ether_addr(ndev->dev_addr);
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
* so don't affect them Set MAC bits [47:32] in EUAW1 */
temac_indirect_out32(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
mutex_unlock(&lp->indirect_mutex);
return 0;
}
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
return temac_set_mac_address(ndev, addr->sa_data);
}
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
u32 multi_addr_msw, multi_addr_lsw, val;
int i;
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
* the cable. If it was a promisc request the
* flag is already set. If not we assert it.
*/
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
multi_addr_msw = ((ha->addr[3] << 24) |
(ha->addr[2] << 16) |
(ha->addr[1] << 8) |
(ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
multi_addr_lsw = ((ha->addr[5] << 8) |
(ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
}
} else {
val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
temac_indirect_out32(lp, XTE_AFM_OFFSET,
val & ~XTE_AFM_EPPRM_MASK);
temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
mutex_unlock(&lp->indirect_mutex);
}
struct temac_option {
int flg;
u32 opt;
u32 reg;
u32 m_or;
u32 m_and;
} temac_options[] = {
/* Turn on jumbo packet support for both Rx and Tx */
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXJMBO_MASK,
},
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or =XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or =XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
.m_or =XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXEN_MASK,
},
{}
};
/**
* temac_setoptions
*/
static u32 temac_setoptions(struct net_device *ndev, u32 options)
{
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
mutex_lock(&lp->indirect_mutex);
while (tp->opt) {
reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
if (options & tp->opt)
reg |= tp->m_or;
temac_indirect_out32(lp, tp->reg, reg);
tp++;
}
lp->options |= options;
mutex_unlock(&lp->indirect_mutex);
return (0);
}
/* Initilize temac */
static void temac_device_reset(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
u32 timeout;
u32 val;
/* Perform a software reset */
/* 0x300 host enable bit ? */
/* reset PHY through control register ?:1 */
dev_dbg(&ndev->dev, "%s()\n", __func__);
mutex_lock(&lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset RX reset timeout!!\n");
break;
}
}
/* Reset the transmitter and wait for it to finish reset */
temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
timeout = 1000;
while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset TX reset timeout!!\n");
break;
}
}
/* Disable the receiver */
val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset DMA reset timeout!!\n");
break;
}
}
lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
temac_dma_bd_init(ndev);
temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
mutex_unlock(&lp->indirect_mutex);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
temac_set_mac_address(ndev, NULL);
/* Set address filter table */
temac_set_multicast_list(ndev);
if (temac_setoptions(ndev, lp->options))
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
u32 mii_speed;
int link_state;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
mutex_lock(&lp->indirect_mutex);
if (lp->last_link != link_state) {
mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
}
/* Write new speed setting out to TEMAC */
temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
lp->last_link = link_state;
phy_print_status(phy);
}
mutex_unlock(&lp->indirect_mutex);
}
static void temac_start_xmit_done(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
unsigned int stat = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
stat = cur_p->app0;
while (stat & STS_CTRL_APP0_CMPLT) {
dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
DMA_TO_DEVICE);
if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += cur_p->len;
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
stat = cur_p->app0;
}
netif_wake_queue(ndev);
}
static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
{
struct cdmac_bd *cur_p;
int tail;
tail = lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[tail];
do {
if (cur_p->app0)
return NETDEV_TX_BUSY;
tail++;
if (tail >= TX_BD_NUM)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
num_frag--;
} while (num_frag >= 0);
return 0;
}
static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
dma_addr_t start_p, tail_p;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
return NETDEV_TX_BUSY;
}
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start_off = skb_transport_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */
cur_p->app1 = (csum_start_off << 16) | csum_index_off;
cur_p->app2 = 0; /* initial checksum seed */
}
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
cur_p->app4 = (unsigned long)skb;
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
(void *)page_address(frag->page) +
frag->page_offset,
frag->size, DMA_TO_DEVICE);
cur_p->len = frag->size;
cur_p->app0 = 0;
frag++;
}
cur_p->app0 |= STS_CTRL_APP0_EOP;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
static void ll_temac_recv(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
unsigned int bdstat;
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == __constant_htons(ETH_P_IP)) &&
(skb->len > 64)) {
skb->csum = cur_p->app3 & 0xFFFF;
skb->ip_summed = CHECKSUM_COMPLETE;
}
netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_skb[lp->rx_bd_ci] = new_skb;
lp->rx_bd_ci++;
if (lp->rx_bd_ci >= RX_BD_NUM)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
status = lp->dma_in(lp, TX_IRQ_REG);
lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
if (status & 0x080)
dev_err(&ndev->dev, "DMA error 0x%x\n", status);
return IRQ_HANDLED;
}
static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
/* Read and clear the status registers */
status = lp->dma_in(lp, RX_IRQ_REG);
lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
return IRQ_HANDLED;
}
static int temac_open(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
int rc;
dev_dbg(&ndev->dev, "temac_open()\n");
if (lp->phy_node) {
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
temac_adjust_link, 0, 0);
if (!lp->phy_dev) {
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
phy_start(lp->phy_dev);
}
rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
if (rc)
goto err_tx_irq;
rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
if (rc)
goto err_rx_irq;
temac_device_reset(ndev);
return 0;
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
dev_err(lp->dev, "request_irq() failed\n");
return rc;
}
static int temac_stop(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "temac_close()\n");
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
temac_poll_controller(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
ll_temac_rx_irq(lp->tx_irq, lp);
ll_temac_tx_irq(lp->rx_irq, lp);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
}
#endif
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
//.ndo_set_multicast_list = temac_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
};
/* ---------------------------------------------------------------------
* SYSFS device attributes
*/
static ssize_t temac_show_llink_regs(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct temac_local *lp = netdev_priv(ndev);
int i, len = 0;
for (i = 0; i < 0x11; i++)
len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
return len;
}
static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
static struct attribute *temac_device_attrs[] = {
&dev_attr_llink_regs.attr,
NULL,
};
static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
static int __init
temac_of_probe(struct of_device *op, const struct of_device_id *match)
{
struct device_node *np;
struct temac_local *lp;
struct net_device *ndev;
const void *addr;
__be32 *p;
int size, rc = 0;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev) {
dev_err(&op->dev, "could not allocate device.\n");
return -ENOMEM;
}
ether_setup(ndev);
dev_set_drvdata(&op->dev, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
ndev->netdev_ops = &temac_netdev_ops;
#if 0
ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
ndev->features |= NETIF_F_LRO; /* large receive offload */
#endif
/* setup temac private info structure */
lp = netdev_priv(ndev);
lp->ndev = ndev;
lp->dev = &op->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
mutex_init(&lp->indirect_mutex);
/* map device registers */
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
if (p && be32_to_cpu(*p)) {
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
}
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
goto err_iounmap;
}
/* Setup the DMA register accesses, could be DCR or memory mapped */
if (temac_dcr_setup(lp, op, np)) {
/* no DCR in the device tree, try non-DCR */
lp->sdma_regs = of_iomap(np, 0);
if (lp->sdma_regs) {
lp->dma_in = temac_dma_in32;
lp->dma_out = temac_dma_out32;
dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
} else {
dev_err(&op->dev, "unable to map DMA registers\n");
goto err_iounmap;
}
}
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
}
of_node_put(np); /* Finished with the DMA node; drop the reference */
/* Retrieve the MAC address */
addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
if ((!addr) || (size != 6)) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
goto err_iounmap_2;
}
temac_set_mac_address(ndev, (void *)addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
dev_warn(&op->dev, "error registering MDIO bus\n");
lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
if (lp->phy_node)
dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
/* Add the device attributes */
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
dev_err(lp->dev, "Error creating sysfs files\n");
goto err_iounmap_2;
}
rc = register_netdev(lp->ndev);
if (rc) {
dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
goto err_register_ndev;
}
return 0;
err_register_ndev:
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
err_iounmap_2:
if (lp->sdma_regs)
iounmap(lp->sdma_regs);
err_iounmap:
iounmap(lp->regs);
nodev:
free_netdev(ndev);
ndev = NULL;
return rc;
}
static int __devexit temac_of_remove(struct of_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
temac_mdio_teardown(lp);
unregister_netdev(ndev);
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
if (lp->phy_node)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
dev_set_drvdata(&op->dev, NULL);
iounmap(lp->regs);
if (lp->sdma_regs)
iounmap(lp->sdma_regs);
free_netdev(ndev);
return 0;
}
static struct of_device_id temac_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
{},
};
MODULE_DEVICE_TABLE(of, temac_of_match);
static struct of_platform_driver temac_of_driver = {
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_temac",
.of_match_table = temac_of_match,
},
};
static int __init temac_init(void)
{
return of_register_platform_driver(&temac_of_driver);
}
module_init(temac_init);
static void __exit temac_exit(void)
{
of_unregister_platform_driver(&temac_of_driver);
}
module_exit(temac_exit);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mericon/kernel-msm8660-gb | sound/soc/sh/migor.c | 756 | 5204 | /*
* ALSA SoC driver for Migo-R
*
* Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <asm/clock.h>
#include <cpu/sh7722.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include "../codecs/wm8978.h"
#include "siu.h"
/* Default 8000Hz sampling frequency */
static unsigned long codec_freq = 8000 * 512;
static unsigned int use_count;
/* External clock, sourced from the codec at the SIUMCKB pin */
static unsigned long siumckb_recalc(struct clk *clk)
{
return codec_freq;
}
static struct clk_ops siumckb_clk_ops = {
.recalc = siumckb_recalc,
};
static struct clk siumckb_clk = {
.name = "siumckb_clk",
.id = -1,
.ops = &siumckb_clk_ops,
.rate = 0, /* initialised at run-time */
};
static int migor_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
int ret;
unsigned int rate = params_rate(params);
ret = snd_soc_dai_set_sysclk(codec_dai, WM8978_PLL, 13000000,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8978_OPCLKRATE, rate * 512);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_NB_IF |
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_fmt(rtd->dai->cpu_dai, SND_SOC_DAIFMT_NB_IF |
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
codec_freq = rate * 512;
/*
* This propagates the parent frequency change to children and
* recalculates the frequency table
*/
clk_set_rate(&siumckb_clk, codec_freq);
dev_dbg(codec_dai->dev, "%s: configure %luHz\n", __func__, codec_freq);
ret = snd_soc_dai_set_sysclk(rtd->dai->cpu_dai, SIU_CLKB_EXT,
codec_freq / 2, SND_SOC_CLOCK_IN);
if (!ret)
use_count++;
return ret;
}
static int migor_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
if (use_count) {
use_count--;
if (!use_count)
snd_soc_dai_set_sysclk(codec_dai, WM8978_PLL, 0,
SND_SOC_CLOCK_IN);
} else {
dev_dbg(codec_dai->dev, "Unbalanced hw_free!\n");
}
return 0;
}
static struct snd_soc_ops migor_dai_ops = {
.hw_params = migor_hw_params,
.hw_free = migor_hw_free,
};
static const struct snd_soc_dapm_widget migor_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Onboard Microphone", NULL),
SND_SOC_DAPM_MIC("External Microphone", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* Headphone output connected to LHP/RHP, enable OUT4 for VMID */
{ "Headphone", NULL, "OUT4 VMID" },
{ "OUT4 VMID", NULL, "LHP" },
{ "OUT4 VMID", NULL, "RHP" },
/* On-board microphone */
{ "RMICN", NULL, "Mic Bias" },
{ "RMICP", NULL, "Mic Bias" },
{ "Mic Bias", NULL, "Onboard Microphone" },
/* External microphone */
{ "LMICN", NULL, "Mic Bias" },
{ "LMICP", NULL, "Mic Bias" },
{ "Mic Bias", NULL, "External Microphone" },
};
static int migor_dai_init(struct snd_soc_codec *codec)
{
snd_soc_dapm_new_controls(codec, migor_dapm_widgets,
ARRAY_SIZE(migor_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
return 0;
}
/* migor digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link migor_dai = {
.name = "wm8978",
.stream_name = "WM8978",
.cpu_dai = &siu_i2s_dai,
.codec_dai = &wm8978_dai,
.ops = &migor_dai_ops,
.init = migor_dai_init,
};
/* migor audio machine driver */
static struct snd_soc_card snd_soc_migor = {
.name = "Migo-R",
.platform = &siu_platform,
.dai_link = &migor_dai,
.num_links = 1,
};
/* migor audio subsystem */
static struct snd_soc_device migor_snd_devdata = {
.card = &snd_soc_migor,
.codec_dev = &soc_codec_dev_wm8978,
};
static struct platform_device *migor_snd_device;
static int __init migor_init(void)
{
int ret;
ret = clk_register(&siumckb_clk);
if (ret < 0)
return ret;
/* Port number used on this machine: port B */
migor_snd_device = platform_device_alloc("soc-audio", 1);
if (!migor_snd_device) {
ret = -ENOMEM;
goto epdevalloc;
}
platform_set_drvdata(migor_snd_device, &migor_snd_devdata);
migor_snd_devdata.dev = &migor_snd_device->dev;
ret = platform_device_add(migor_snd_device);
if (ret)
goto epdevadd;
return 0;
epdevadd:
platform_device_put(migor_snd_device);
epdevalloc:
clk_unregister(&siumckb_clk);
return ret;
}
static void __exit migor_exit(void)
{
clk_unregister(&siumckb_clk);
platform_device_unregister(migor_snd_device);
}
module_init(migor_init);
module_exit(migor_exit);
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_DESCRIPTION("ALSA SoC Migor");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jtoppins/net-next | net/bluetooth/bnep/sock.c | 1012 | 5872 | /*
BNEP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2001-2002 Inventel Systemes
Written 2001-2002 by
David Libault <david.libault@inventel.fr>
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/export.h>
#include <linux/file.h>
#include "bnep.h"
static struct bt_sock_list bnep_sk_list = {
.lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
};
static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
BT_DBG("sock %p sk %p", sock, sk);
if (!sk)
return 0;
bt_sock_unlink(&bnep_sk_list, sk);
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
void __user *argp = (void __user *)arg;
__u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
nsock = sockfd_lookup(ca.sock, &err);
if (!nsock)
return err;
if (nsock->sk->sk_state != BT_CONNECTED) {
sockfd_put(nsock);
return -EBADFD;
}
ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else
sockfd_put(nsock);
return err;
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return bnep_del_connection(&cd);
case BNEPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
return -EFAULT;
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && copy_to_user(argp, &cl, sizeof(cl)))
return -EFAULT;
return err;
case BNEPGETCONNINFO:
if (copy_from_user(&ci, argp, sizeof(ci)))
return -EFAULT;
err = bnep_get_conninfo(&ci);
if (!err && copy_to_user(argp, &ci, sizeof(ci)))
return -EFAULT;
return err;
case BNEPGETSUPPFEAT:
if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
return -EFAULT;
return 0;
default:
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_COMPAT
static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
u32 uci;
int err;
if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
cl.ci = compat_ptr(uci);
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
}
return bnep_sock_ioctl(sock, cmd, arg);
}
#endif
static const struct proto_ops bnep_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = bnep_sock_release,
.ioctl = bnep_sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = bnep_sock_compat_ioctl,
#endif
.bind = sock_no_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.poll = sock_no_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static struct proto bnep_proto = {
.name = "BNEP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct bt_sock)
};
static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &bnep_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
bt_sock_link(&bnep_sk_list, sk);
return 0;
}
static const struct net_proto_family bnep_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = bnep_sock_create
};
int __init bnep_sock_init(void)
{
int err;
err = proto_register(&bnep_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
if (err < 0) {
BT_ERR("Can't register BNEP socket");
goto error;
}
err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create BNEP proc file");
bt_sock_unregister(BTPROTO_BNEP);
goto error;
}
BT_INFO("BNEP socket layer initialized");
return 0;
error:
proto_unregister(&bnep_proto);
return err;
}
void __exit bnep_sock_cleanup(void)
{
bt_procfs_cleanup(&init_net, "bnep");
bt_sock_unregister(BTPROTO_BNEP);
proto_unregister(&bnep_proto);
}
| gpl-2.0 |
Twisted-Kernel/Sick-Twisted-Unified | net/netfilter/ipvs/ip_vs_ctl.c | 2036 | 97348 | /*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the NetFilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#include <net/ip6_route.h>
#endif
#include <net/route.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <asm/uaccess.h>
#include <net/ip_vs.h>
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
static DEFINE_MUTEX(__ip_vs_mutex);
/* sysctl variables */
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
int ip_vs_get_debug_level(void)
{
return sysctl_ip_vs_debug_level;
}
#endif
/* Protos */
static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup);
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
static bool __ip_vs_addr_is_local_v6(struct net *net,
const struct in6_addr *addr)
{
struct flowi6 fl6 = {
.daddr = *addr,
};
struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
bool is_local;
is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
dst_release(dst);
return is_local;
}
#endif
#ifdef CONFIG_SYSCTL
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
static int old_secure_tcp = 0;
int availmem;
int nomem;
int to_change = -1;
/* we only count free and buffered memory (in pages) */
si_meminfo(&i);
availmem = i.freeram + i.bufferram;
/* however in linux 2.5 the i.bufferram is total page cache size,
we need adjust it */
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
spin_lock(&ipvs->dropentry_lock);
switch (ipvs->sysctl_drop_entry) {
case 0:
atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
ipvs->sysctl_drop_entry = 2;
} else {
atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
} else {
atomic_set(&ipvs->dropentry, 0);
ipvs->sysctl_drop_entry = 1;
};
break;
case 3:
atomic_set(&ipvs->dropentry, 1);
break;
}
spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
spin_lock(&ipvs->droppacket_lock);
switch (ipvs->sysctl_drop_packet) {
case 0:
ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
ipvs->sysctl_drop_packet = 2;
} else {
ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
} else {
ipvs->drop_rate = 0;
ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
ipvs->sysctl_secure_tcp = 2;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
if (old_secure_tcp < 2)
to_change = 1;
break;
}
old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1);
spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
/*
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
static void defense_work_handler(struct work_struct *work)
{
struct netns_ipvs *ipvs =
container_of(work, struct netns_ipvs, defense_work.work);
update_defense_level(ipvs);
if (atomic_read(&ipvs->dropentry))
ip_vs_random_dropentry(ipvs->net);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
#endif
int
ip_vs_use_count_inc(void)
{
return try_module_get(THIS_MODULE);
}
void
ip_vs_use_count_dec(void)
{
module_put(THIS_MODULE);
}
/*
* Hash table: for virtual service lookups
*/
#define IP_VS_SVC_TAB_BITS 8
#define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS)
#define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1)
/* the service table hashed by <protocol, addr, port> */
static struct hlist_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct hlist_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
/*
* Returns hash value for virtual service
*/
static inline unsigned int
ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
const union nf_inet_addr *addr, __be16 port)
{
register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
__u32 ahash;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
ahash = ntohl(addr_fold);
ahash ^= ((size_t) net >> 8);
return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) &
IP_VS_SVC_TAB_MASK;
}
/*
* Returns hash value of fwmark for virtual service lookup
*/
static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
* Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
static int ip_vs_svc_hash(struct ip_vs_service *svc)
{
unsigned int hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
pr_err("%s(): request for already hashed, called from %pF\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/*
* Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
&svc->addr, svc->port);
hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
* Hash it by fwmark in svc_fwm_table
*/
hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
svc->flags |= IP_VS_SVC_F_HASHED;
/* increase its refcnt because it is referenced by the svc table */
atomic_inc(&svc->refcnt);
return 1;
}
/*
* Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
{
if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
pr_err("%s(): request for unhash flagged, called from %pF\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/* Remove it from the svc_table table */
hlist_del_rcu(&svc->s_list);
} else {
/* Remove it from the svc_fwm_table table */
hlist_del_rcu(&svc->f_list);
}
svc->flags &= ~IP_VS_SVC_F_HASHED;
atomic_dec(&svc->refcnt);
return 1;
}
/*
* Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_service_find(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) {
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
&& (svc->protocol == protocol)
&& net_eq(svc->net, net)) {
/* HIT */
return svc;
}
}
return NULL;
}
/*
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
hash = ip_vs_svc_fwm_hashkey(net, fwmark);
hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) {
if (svc->fwmark == fwmark && svc->af == af
&& net_eq(svc->net, net)) {
/* HIT */
return svc;
}
}
return NULL;
}
/* Find service, called under RCU lock */
struct ip_vs_service *
ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
struct netns_ipvs *ipvs = net_ipvs(net);
/*
* Check the table hashed by fwmark first
*/
if (fwmark) {
svc = __ip_vs_svc_fwm_find(net, af, fwmark);
if (svc)
goto out;
}
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
if (svc == NULL
&& protocol == IPPROTO_TCP
&& atomic_read(&ipvs->ftpsvc_counter)
&& (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
&& atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
}
out:
IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
fwmark, ip_vs_proto_name(protocol),
IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
svc ? "hit" : "not hit");
return svc;
}
static inline void
__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
atomic_inc(&svc->refcnt);
dest->svc = svc;
}
static void ip_vs_service_free(struct ip_vs_service *svc)
{
if (svc->stats.cpustats)
free_percpu(svc->stats.cpustats);
kfree(svc);
}
static void
__ip_vs_unbind_svc(struct ip_vs_dest *dest)
{
struct ip_vs_service *svc = dest->svc;
dest->svc = NULL;
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
ip_vs_service_free(svc);
}
}
/*
* Returns hash value for real service
*/
static inline unsigned int ip_vs_rs_hashkey(int af,
const union nf_inet_addr *addr,
__be16 port)
{
register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
& IP_VS_RTAB_MASK;
}
/* Hash ip_vs_dest in rs_table by <proto,addr,port>. */
static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned int hash;
if (dest->in_rs_table)
return;
/*
* Hash by proto,addr,port,
* which are the parameters of the real service.
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]);
dest->in_rs_table = 1;
}
/* Unhash ip_vs_dest from rs_table. */
static void ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
* Remove it from the rs_table table.
*/
if (dest->in_rs_table) {
hlist_del_rcu(&dest->d_list);
dest->in_rs_table = 0;
}
}
/* Check if real service by <proto,addr,port> is present */
bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport)
{
struct netns_ipvs *ipvs = net_ipvs(net);
unsigned int hash;
struct ip_vs_dest *dest;
/* Check for "full" addressed entries */
hash = ip_vs_rs_hashkey(af, daddr, dport);
rcu_read_lock();
hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
if (dest->port == dport &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
(dest->protocol == protocol || dest->vfwmark)) {
/* HIT */
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/* Lookup destination by {addr,port} in the given service
* Called under RCU lock.
*/
static struct ip_vs_dest *
ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest;
/*
* Find the destination for the given service
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if ((dest->af == svc->af)
&& ip_vs_addr_equal(svc->af, &dest->addr, daddr)
&& (dest->port == dport)) {
/* HIT */
return dest;
}
}
return NULL;
}
/*
* Find destination by {daddr,dport,vaddr,protocol}
* Created to be used in ip_vs_process_message() in
* the backup synchronization daemon. It finds the
* destination to be bound to the received connection
* on the backup.
* Called under RCU lock, no refcnt is returned.
*/
struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
__be16 vport, __u16 protocol, __u32 fwmark,
__u32 flags)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
__be16 port = dport;
svc = ip_vs_service_find(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
port = 0;
dest = ip_vs_lookup_dest(svc, daddr, port);
if (!dest)
dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
return dest;
}
void ip_vs_dest_dst_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest_dst *dest_dst = container_of(head,
struct ip_vs_dest_dst,
rcu_head);
dst_release(dest_dst->dst_cache);
kfree(dest_dst);
}
/* Release dest_dst and dst_cache for dest in user context */
static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest)
{
struct ip_vs_dest_dst *old;
old = rcu_dereference_protected(dest->dest_dst, 1);
if (old) {
RCU_INIT_POINTER(dest->dest_dst, NULL);
call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
}
}
/*
* Lookup dest by {svc,addr,port} in the destination trash.
* The destination trash is used to hold the destinations that are removed
* from the service table but are still referenced by some conn entries.
* The reason to add the destination trash is when the dest is temporary
* down (either by administrator or by monitor program), the dest can be
* picked back from the trash, the remaining connections to the dest can
* continue, and the counting information of the dest is also useful for
* scheduling.
*/
static struct ip_vs_dest *
ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
/*
* Find the destination in trash
*/
spin_lock_bh(&ipvs->dest_trash_lock);
list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
/* We can not reuse dest while in grace period
* because conns still can use dest->svc
*/
if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
continue;
if (dest->af == svc->af &&
ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
dest->port == dport &&
dest->vfwmark == svc->fwmark &&
dest->protocol == svc->protocol &&
(svc->fwmark ||
(ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
dest->vport == svc->port))) {
/* HIT */
list_del(&dest->t_list);
ip_vs_dest_hold(dest);
goto out;
}
}
dest = NULL;
out:
spin_unlock_bh(&ipvs->dest_trash_lock);
return dest;
}
static void ip_vs_dest_free(struct ip_vs_dest *dest)
{
__ip_vs_dst_cache_reset(dest);
__ip_vs_unbind_svc(dest);
free_percpu(dest->stats.cpustats);
kfree(dest);
}
/*
* Clean up all the destinations in the trash
* Called by the ip_vs_control_cleanup()
*
* When the ip_vs_control_clearup is activated by ipvs module exit,
* the service tables must have been flushed and all the connections
* are expired, and the refcnt of each destination in the trash must
* be 0, so we simply release them here.
*/
static void ip_vs_trash_cleanup(struct net *net)
{
struct ip_vs_dest *dest, *nxt;
struct netns_ipvs *ipvs = net_ipvs(net);
del_timer_sync(&ipvs->dest_trash_timer);
/* No need to use dest_trash_lock */
list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) {
list_del(&dest->t_list);
ip_vs_dest_free(dest);
}
}
static void
ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
{
#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
spin_lock_bh(&src->lock);
IP_VS_SHOW_STATS_COUNTER(conns);
IP_VS_SHOW_STATS_COUNTER(inpkts);
IP_VS_SHOW_STATS_COUNTER(outpkts);
IP_VS_SHOW_STATS_COUNTER(inbytes);
IP_VS_SHOW_STATS_COUNTER(outbytes);
ip_vs_read_estimator(dst, src);
spin_unlock_bh(&src->lock);
}
static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
/* get current counters as zero point, rates are zeroed */
#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
IP_VS_ZERO_STATS_COUNTER(conns);
IP_VS_ZERO_STATS_COUNTER(inpkts);
IP_VS_ZERO_STATS_COUNTER(outpkts);
IP_VS_ZERO_STATS_COUNTER(inbytes);
IP_VS_ZERO_STATS_COUNTER(outbytes);
ip_vs_zero_estimator(stats);
spin_unlock_bh(&stats->lock);
}
/*
* Update a destination in the given service
*/
static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
struct netns_ipvs *ipvs = net_ipvs(svc->net);
struct ip_vs_scheduler *sched;
int conn_flags;
/* set the weight and the flags */
atomic_set(&dest->weight, udest->weight);
conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
conn_flags |= IP_VS_CONN_F_INACTIVE;
/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/*
* Put the real service in rs_table if not present.
* For now only for NAT!
*/
ip_vs_rs_hash(ipvs, dest);
}
atomic_set(&dest->conn_flags, conn_flags);
/* bind the service */
if (!dest->svc) {
__ip_vs_bind_svc(dest, svc);
} else {
if (dest->svc != svc) {
__ip_vs_unbind_svc(dest);
ip_vs_zero_stats(&dest->stats);
__ip_vs_bind_svc(dest, svc);
}
}
/* set the dest status flags */
dest->flags |= IP_VS_DEST_F_AVAILABLE;
if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
dest->u_threshold = udest->u_threshold;
dest->l_threshold = udest->l_threshold;
spin_lock_bh(&dest->dst_lock);
__ip_vs_dst_cache_reset(dest);
spin_unlock_bh(&dest->dst_lock);
sched = rcu_dereference_protected(svc->scheduler, 1);
if (add) {
ip_vs_start_estimator(svc->net, &dest->stats);
list_add_rcu(&dest->n_list, &svc->destinations);
svc->num_dests++;
if (sched->add_dest)
sched->add_dest(svc, dest);
} else {
if (sched->upd_dest)
sched->upd_dest(svc, dest);
}
}
/*
* Create a destination for the given service
*/
static int
ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest **dest_p)
{
struct ip_vs_dest *dest;
unsigned int atype;
EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6) {
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
!__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
return -EINVAL;
} else
#endif
{
atype = inet_addr_type(svc->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
if (dest == NULL)
return -ENOMEM;
dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!dest->stats.cpustats)
goto err_alloc;
dest->af = svc->af;
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
dest->vport = svc->port;
dest->vfwmark = svc->fwmark;
ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
dest->port = udest->port;
atomic_set(&dest->activeconns, 0);
atomic_set(&dest->inactconns, 0);
atomic_set(&dest->persistconns, 0);
atomic_set(&dest->refcnt, 1);
INIT_HLIST_NODE(&dest->d_list);
spin_lock_init(&dest->dst_lock);
spin_lock_init(&dest->stats.lock);
__ip_vs_update_dest(svc, dest, udest, 1);
*dest_p = dest;
LeaveFunction(2);
return 0;
err_alloc:
kfree(dest);
return -ENOMEM;
}
/*
* Add a destination into an existing service
*/
static int
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
int ret;
EnterFunction(2);
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, &daddr, dport);
rcu_read_unlock();
if (dest != NULL) {
IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
return -EEXIST;
}
/*
* Check if the dest already exists in the trash and
* is from the same service
*/
dest = ip_vs_trash_get_dest(svc, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
"dest->refcnt=%d, service %u/%s:%u\n",
IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
ntohs(dest->vport));
__ip_vs_update_dest(svc, dest, udest, 1);
ret = 0;
} else {
/*
* Allocate and initialize the dest structure
*/
ret = ip_vs_new_dest(svc, udest, &dest);
}
LeaveFunction(2);
return ret;
}
/*
* Edit a destination in the given service
*/
static int
ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
EnterFunction(2);
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, &daddr, dport);
rcu_read_unlock();
if (dest == NULL) {
IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__);
return -ENOENT;
}
__ip_vs_update_dest(svc, dest, udest, 0);
LeaveFunction(2);
return 0;
}
static void ip_vs_dest_wait_readers(struct rcu_head *head)
{
struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
rcu_head);
/* End of grace period after unlinking */
clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
}
/*
* Delete a destination (must be already unlinked from the service)
*/
static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
bool cleanup)
{
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_stop_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
ip_vs_rs_unhash(dest);
if (!cleanup) {
set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
}
spin_lock_bh(&ipvs->dest_trash_lock);
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
if (list_empty(&ipvs->dest_trash) && !cleanup)
mod_timer(&ipvs->dest_trash_timer,
jiffies + IP_VS_DEST_TRASH_PERIOD);
/* dest lives in trash without reference */
list_add(&dest->t_list, &ipvs->dest_trash);
spin_unlock_bh(&ipvs->dest_trash_lock);
ip_vs_dest_put(dest);
}
/*
* Unlink a destination from the given service
*/
static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
int svcupd)
{
dest->flags &= ~IP_VS_DEST_F_AVAILABLE;
/*
* Remove it from the d-linked destination list.
*/
list_del_rcu(&dest->n_list);
svc->num_dests--;
if (svcupd) {
struct ip_vs_scheduler *sched;
sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched->del_dest)
sched->del_dest(svc, dest);
}
}
/*
* Delete a destination server in the given service
*/
static int
ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
__be16 dport = udest->port;
EnterFunction(2);
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
rcu_read_unlock();
if (dest == NULL) {
IP_VS_DBG(1, "%s(): destination not found!\n", __func__);
return -ENOENT;
}
/*
* Unlink dest from the service
*/
__ip_vs_unlink_dest(svc, dest, 1);
/*
* Delete the destination
*/
__ip_vs_del_dest(svc->net, dest, false);
LeaveFunction(2);
return 0;
}
static void ip_vs_dest_trash_expire(unsigned long data)
{
struct net *net = (struct net *) data;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_dest *dest, *next;
spin_lock(&ipvs->dest_trash_lock);
list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
/* Skip if dest is in grace period */
if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
continue;
if (atomic_read(&dest->refcnt) > 0)
continue;
IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
dest->vfwmark,
IP_VS_DBG_ADDR(dest->svc->af, &dest->addr),
ntohs(dest->port));
list_del(&dest->t_list);
ip_vs_dest_free(dest);
}
if (!list_empty(&ipvs->dest_trash))
mod_timer(&ipvs->dest_trash_timer,
jiffies + IP_VS_DEST_TRASH_PERIOD);
spin_unlock(&ipvs->dest_trash_lock);
}
/*
* Add a service into the service hash table
*/
static int
ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
/* Lookup the scheduler by 'u->sched_name' */
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
ret = -ENOENT;
goto out_err;
}
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out_err;
}
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6) {
__u32 plen = (__force __u32) u->netmask;
if (plen < 1 || plen > 128) {
ret = -EINVAL;
goto out_err;
}
}
#endif
svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
if (svc == NULL) {
IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!svc->stats.cpustats) {
ret = -ENOMEM;
goto out_err;
}
/* I'm the first user of the service */
atomic_set(&svc->refcnt, 0);
svc->af = u->af;
svc->protocol = u->protocol;
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port;
svc->fwmark = u->fwmark;
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
svc->net = net;
INIT_LIST_HEAD(&svc->destinations);
spin_lock_init(&svc->sched_lock);
spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
if (ret)
goto out_err;
sched = NULL;
/* Bind the ct retriever */
RCU_INIT_POINTER(svc->pe, pe);
pe = NULL;
/* Update the virtual service counters */
if (svc->port == FTPPORT)
atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_inc(&ipvs->nullsvc_counter);
ip_vs_start_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services++;
/* Hash the service into the service table */
ip_vs_svc_hash(svc);
*svc_p = svc;
/* Now there is a service - full throttle */
ipvs->enable = 1;
return 0;
out_err:
if (svc != NULL) {
ip_vs_unbind_scheduler(svc, sched);
ip_vs_service_free(svc);
}
ip_vs_scheduler_put(sched);
ip_vs_pe_put(pe);
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
/*
* Edit a service and bind it with a new scheduler
*/
static int
ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
{
struct ip_vs_scheduler *sched, *old_sched;
struct ip_vs_pe *pe = NULL, *old_pe = NULL;
int ret = 0;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
return -ENOENT;
}
old_sched = sched;
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out;
}
old_pe = pe;
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6) {
__u32 plen = (__force __u32) u->netmask;
if (plen < 1 || plen > 128) {
ret = -EINVAL;
goto out;
}
}
#endif
old_sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched != old_sched) {
/* Bind the new scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
if (ret) {
old_sched = sched;
goto out;
}
/* Unbind the old scheduler on success */
ip_vs_unbind_scheduler(svc, old_sched);
}
/*
* Set the flags and timeout value
*/
svc->flags = u->flags | IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
old_pe = rcu_dereference_protected(svc->pe, 1);
if (pe != old_pe)
rcu_assign_pointer(svc->pe, pe);
out:
ip_vs_scheduler_put(old_sched);
ip_vs_pe_put(old_pe);
return ret;
}
static void ip_vs_service_rcu_free(struct rcu_head *head)
{
struct ip_vs_service *svc;
svc = container_of(head, struct ip_vs_service, rcu_head);
ip_vs_service_free(svc);
}
/*
* Delete a service from the service list
* - The service must be unlinked, unlocked and not referenced!
* - We are called under _bh lock
*/
static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services--;
ip_vs_stop_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = rcu_dereference_protected(svc->scheduler, 1);
ip_vs_unbind_scheduler(svc, old_sched);
ip_vs_scheduler_put(old_sched);
/* Unbind persistence engine, keep svc->pe */
old_pe = rcu_dereference_protected(svc->pe, 1);
ip_vs_pe_put(old_pe);
/*
* Unlink the whole destination list
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(svc->net, dest, cleanup);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
}
/* decrease the module use count */
ip_vs_use_count_dec();
}
/*
* Unlink a service from list and try to delete it if its refcnt reached 0
*/
static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
{
/* Hold svc to avoid double release from dest_trash */
atomic_inc(&svc->refcnt);
/*
* Unhash it from the service table
*/
ip_vs_svc_unhash(svc);
__ip_vs_del_service(svc, cleanup);
}
/*
* Delete a service from the service list
*/
static int ip_vs_del_service(struct ip_vs_service *svc)
{
if (svc == NULL)
return -EEXIST;
ip_vs_unlink_service(svc, false);
return 0;
}
/*
* Flush all the virtual services
*/
static int ip_vs_flush(struct net *net, bool cleanup)
{
int idx;
struct ip_vs_service *svc;
struct hlist_node *n;
/*
* Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx],
s_list) {
if (net_eq(svc->net, net))
ip_vs_unlink_service(svc, cleanup);
}
}
/*
* Flush the service table hashed by fwmark
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx],
f_list) {
if (net_eq(svc->net, net))
ip_vs_unlink_service(svc, cleanup);
}
}
return 0;
}
/*
* Delete service by {netns} in the service table.
* Called by __ip_vs_cleanup()
*/
void ip_vs_service_net_cleanup(struct net *net)
{
EnterFunction(2);
/* Check for "full" addressed entries */
mutex_lock(&__ip_vs_mutex);
ip_vs_flush(net, true);
mutex_unlock(&__ip_vs_mutex);
LeaveFunction(2);
}
/* Put all references for device (dst_cache) */
static inline void
ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
{
struct ip_vs_dest_dst *dest_dst;
spin_lock_bh(&dest->dst_lock);
dest_dst = rcu_dereference_protected(dest->dest_dst, 1);
if (dest_dst && dest_dst->dst_cache->dev == dev) {
IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
dev->name,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
__ip_vs_dst_cache_reset(dest);
}
spin_unlock_bh(&dest->dst_lock);
}
/* Netdev event receiver
* Currently only NETDEV_DOWN is handled to release refs to cached dsts
*/
static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
unsigned int idx;
if (event != NETDEV_DOWN || !ipvs)
return NOTIFY_DONE;
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
EnterFunction(2);
mutex_lock(&__ip_vs_mutex);
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net)) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
ip_vs_forget_dev(dest, dev);
}
}
}
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net)) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
ip_vs_forget_dev(dest, dev);
}
}
}
}
spin_lock_bh(&ipvs->dest_trash_lock);
list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
ip_vs_forget_dev(dest, dev);
}
spin_unlock_bh(&ipvs->dest_trash_lock);
mutex_unlock(&__ip_vs_mutex);
LeaveFunction(2);
return NOTIFY_DONE;
}
/*
* Zero counters in a service or all services
*/
static int ip_vs_zero_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
list_for_each_entry(dest, &svc->destinations, n_list) {
ip_vs_zero_stats(&dest->stats);
}
ip_vs_zero_stats(&svc->stats);
return 0;
}
static int ip_vs_zero_all(struct net *net)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net))
ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net))
ip_vs_zero_service(svc);
}
}
ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
return 0;
}
#ifdef CONFIG_SYSCTL
static int zero;
static int three = 3;
static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
*valp = val;
} else {
update_defense_level(net_ipvs(net));
}
}
return rc;
}
static int
proc_do_sync_threshold(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val[2];
int rc;
/* backup the value first */
memcpy(val, valp, sizeof(val));
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (valp[0] < 0 || valp[1] < 0 ||
(valp[0] >= valp[1] && valp[1]))) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
}
return rc;
}
static int
proc_do_sync_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 1)) {
/* Restore the correct value */
*valp = val;
}
}
return rc;
}
static int
proc_do_sync_ports(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if (*valp < 1 || !is_power_of_2(*valp)) {
/* Restore the correct value */
*valp = val;
}
}
return rc;
}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
* Do not change order or insert new entries without
* align with netns init in ip_vs_control_net_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "am_droprate",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
{
.procname = "secure_tcp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "sync_version",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_do_sync_mode,
},
{
.procname = "sync_ports",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_do_sync_ports,
},
{
.procname = "sync_qlen_max",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_sock_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cache_bypass",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_nodest_conn",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_quiescent_template",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_threshold",
.maxlen =
sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
.mode = 0644,
.proc_handler = proc_do_sync_threshold,
},
{
.procname = "sync_refresh_period",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "sync_retries",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &three,
},
{
.procname = "nat_icmp_send",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "pmtu_disc",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "backup_only",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
.data = &sysctl_ip_vs_debug_level,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if 0
{
.procname = "timeout_established",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synsent",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synrecv",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_finwait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_timewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_close",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_closewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_lastack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_listen",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_udp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_icmp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
{ }
};
#endif
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct hlist_head *table;
int bucket;
};
/*
* Write the contents of the VS rule table to a PROCfs file.
* (It is kept just for backward compatibility)
*/
static inline const char *ip_vs_fwd_name(unsigned int flags)
{
switch (flags & IP_VS_CONN_F_FWD_MASK) {
case IP_VS_CONN_F_LOCALNODE:
return "Local";
case IP_VS_CONN_F_TUNNEL:
return "Tunnel";
case IP_VS_CONN_F_DROUTE:
return "Route";
default:
return "Masq";
}
}
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
}
}
}
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx],
f_list) {
if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
}
}
}
return NULL;
}
static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct hlist_node *e;
struct ip_vs_iter *iter;
struct ip_vs_service *svc;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_info_array(seq,0);
svc = v;
iter = seq->private;
if (iter->table == ip_vs_svc_table) {
/* next service in table hashed by protocol */
e = rcu_dereference(hlist_next_rcu(&svc->s_list));
if (e)
return hlist_entry(e, struct ip_vs_service, s_list);
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
hlist_for_each_entry_rcu(svc,
&ip_vs_svc_table[iter->bucket],
s_list) {
return svc;
}
}
iter->table = ip_vs_svc_fwm_table;
iter->bucket = -1;
goto scan_fwmark;
}
/* next service in hashed by fwmark */
e = rcu_dereference(hlist_next_rcu(&svc->f_list));
if (e)
return hlist_entry(e, struct ip_vs_service, f_list);
scan_fwmark:
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
hlist_for_each_entry_rcu(svc,
&ip_vs_svc_fwm_table[iter->bucket],
f_list)
return svc;
}
return NULL;
}
static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"IP Virtual Server version %d.%d.%d (size=%d)\n",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
seq_puts(seq,
"Prot LocalAddress:Port Scheduler Flags\n");
seq_puts(seq,
" -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
} else {
const struct ip_vs_service *svc = v;
const struct ip_vs_iter *iter = seq->private;
const struct ip_vs_dest *dest;
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
if (iter->table == ip_vs_svc_table) {
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
seq_printf(seq, "%s [%pI6]:%04X %s ",
ip_vs_proto_name(svc->protocol),
&svc->addr.in6,
ntohs(svc->port),
sched->name);
else
#endif
seq_printf(seq, "%s %08X:%04X %s %s ",
ip_vs_proto_name(svc->protocol),
ntohl(svc->addr.ip),
ntohs(svc->port),
sched->name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
} else {
seq_printf(seq, "FWM %08X %s %s",
svc->fwmark, sched->name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
}
if (svc->flags & IP_VS_SVC_F_PERSISTENT)
seq_printf(seq, "persistent %d %08X\n",
svc->timeout,
ntohl(svc->netmask));
else
seq_putc(seq, '\n');
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
#ifdef CONFIG_IP_VS_IPV6
if (dest->af == AF_INET6)
seq_printf(seq,
" -> [%pI6]:%04X"
" %-7s %-6d %-10d %-10d\n",
&dest->addr.in6,
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
else
#endif
seq_printf(seq,
" -> %08X:%04X "
"%-7s %-6d %-10d %-10d\n",
ntohl(dest->addr.ip),
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
}
}
return 0;
}
static const struct seq_operations ip_vs_info_seq_ops = {
.start = ip_vs_info_seq_start,
.next = ip_vs_info_seq_next,
.stop = ip_vs_info_seq_stop,
.show = ip_vs_info_seq_show,
};
static int ip_vs_info_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_info_seq_ops,
sizeof(struct ip_vs_iter));
}
static const struct file_operations ip_vs_info_fops = {
.owner = THIS_MODULE,
.open = ip_vs_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats_user show;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
show.inpkts, show.outpkts,
(unsigned long long) show.inbytes,
(unsigned long long) show.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, "%8X %8X %8X %16X %16X\n",
show.cps, show.inpps, show.outpps,
show.inbps, show.outbps);
return 0;
}
static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, ip_vs_stats_show);
}
static const struct file_operations ip_vs_stats_fops = {
.owner = THIS_MODULE,
.open = ip_vs_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats;
struct ip_vs_stats_user rates;
int i;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_printf(seq,
"CPU Conns Packets Packets Bytes Bytes\n");
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
unsigned int start;
__u64 inbytes, outbytes;
do {
start = u64_stats_fetch_begin_bh(&u->syncp);
inbytes = u->ustats.inbytes;
outbytes = u->ustats.outbytes;
} while (u64_stats_fetch_retry_bh(&u->syncp, start));
seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
i, u->ustats.conns, u->ustats.inpkts,
u->ustats.outpkts, (__u64)inbytes,
(__u64)outbytes);
}
spin_lock_bh(&tot_stats->lock);
seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
tot_stats->ustats.conns, tot_stats->ustats.inpkts,
tot_stats->ustats.outpkts,
(unsigned long long) tot_stats->ustats.inbytes,
(unsigned long long) tot_stats->ustats.outbytes);
ip_vs_read_estimator(&rates, tot_stats);
spin_unlock_bh(&tot_stats->lock);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, " %8X %8X %8X %16X %16X\n",
rates.cps,
rates.inpps,
rates.outpps,
rates.inbps,
rates.outbps);
return 0;
}
static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, ip_vs_stats_percpu_show);
}
static const struct file_operations ip_vs_stats_percpu_fops = {
.owner = THIS_MODULE,
.open = ip_vs_stats_percpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
return 0;
}
#define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
#define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user))
#define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \
sizeof(struct ip_vs_dest_user))
#define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
#define MAX_ARG_LEN SVCDEST_ARG_LEN
static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
[SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0,
[SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
};
static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
struct ip_vs_service_user *usvc_compat)
{
memset(usvc, 0, sizeof(*usvc));
usvc->af = AF_INET;
usvc->protocol = usvc_compat->protocol;
usvc->addr.ip = usvc_compat->addr;
usvc->port = usvc_compat->port;
usvc->fwmark = usvc_compat->fwmark;
/* Deep copy of sched_name is not needed here */
usvc->sched_name = usvc_compat->sched_name;
usvc->flags = usvc_compat->flags;
usvc->timeout = usvc_compat->timeout;
usvc->netmask = usvc_compat->netmask;
}
static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest_user *udest_compat)
{
memset(udest, 0, sizeof(*udest));
udest->addr.ip = udest_compat->addr;
udest->port = udest_compat->port;
udest->conn_flags = udest_compat->conn_flags;
udest->weight = udest_compat->weight;
udest->u_threshold = udest_compat->u_threshold;
udest->l_threshold = udest_compat->l_threshold;
}
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
struct netns_ipvs *ipvs = net_ipvs(net);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
return -EINVAL;
if (len < 0 || len > MAX_ARG_LEN)
return -EINVAL;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
/* increase the module use count */
ip_vs_use_count_inc();
/* Handle daemons since they have another lock */
if (cmd == IP_VS_SO_SET_STARTDAEMON ||
cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_STARTDAEMON)
ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
dm->syncid);
else
ret = stop_sync_thread(net, dm->state);
mutex_unlock(&ipvs->sync_mutex);
goto out_dec;
}
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
ret = ip_vs_flush(net, false);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
/* We only use the new structs internally, so copy userspace compat
* structs to extended internal versions */
ip_vs_copy_usvc_compat(&usvc, usvc_compat);
ip_vs_copy_udest_compat(&udest, udest_compat);
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by <protocol, addr, port> or fwmark */
rcu_read_lock();
if (usvc.fwmark == 0)
svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
rcu_read_unlock();
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IP_VS_SO_SET_DEL:
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IP_VS_SO_SET_EDITDEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
break;
default:
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
static void
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
struct ip_vs_scheduler *sched;
sched = rcu_dereference_protected(src->scheduler, 1);
dst->protocol = src->protocol;
dst->addr = src->addr.ip;
dst->port = src->port;
dst->fwmark = src->fwmark;
strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
dst->num_dests = src->num_dests;
ip_vs_copy_stats(&dst->stats, &src->stats);
}
static inline int
__ip_vs_get_service_entries(struct net *net,
const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
struct ip_vs_service *svc;
struct ip_vs_service_entry entry;
int ret = 0;
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
out:
return ret;
}
static inline int
__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
union nf_inet_addr addr = { .ip = get->addr };
int ret = 0;
rcu_read_lock();
if (get->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
get->port);
rcu_read_unlock();
if (svc) {
int count = 0;
struct ip_vs_dest *dest;
struct ip_vs_dest_entry entry;
memset(&entry, 0, sizeof(entry));
list_for_each_entry(dest, &svc->destinations, n_list) {
if (count >= get->num_dests)
break;
entry.addr = dest->addr.ip;
entry.port = dest->port;
entry.conn_flags = atomic_read(&dest->conn_flags);
entry.weight = atomic_read(&dest->weight);
entry.u_threshold = dest->u_threshold;
entry.l_threshold = dest->l_threshold;
entry.activeconns = atomic_read(&dest->activeconns);
entry.inactconns = atomic_read(&dest->inactconns);
entry.persistconns = atomic_read(&dest->persistconns);
ip_vs_copy_stats(&entry.stats, &dest->stats);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
break;
}
count++;
}
} else
ret = -ESRCH;
return ret;
}
static inline void
__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
memset(u, 0, sizeof (*u));
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
u->udp_timeout =
pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
#define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
#define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo))
#define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services))
#define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry))
#define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests))
#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
[GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
[GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN,
};
static int
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
copylen = get_arglen[GET_CMDID(cmd)];
if (copylen > 128)
return -EINVAL;
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
* Handle daemons first since it has its own locking
*/
if (cmd == IP_VS_SO_GET_DAEMON) {
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (mutex_lock_interruptible(&ipvs->sync_mutex))
return -ERESTARTSYS;
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->master_syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
mutex_unlock(&ipvs->sync_mutex);
return ret;
}
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(net, get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
rcu_read_lock();
if (entry->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET,
entry->protocol, &addr,
entry->port);
rcu_read_unlock();
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static struct nf_sockopt_ops ip_vs_sockopts = {
.pf = PF_INET,
.set_optmin = IP_VS_BASE_CTL,
.set_optmax = IP_VS_SO_SET_MAX+1,
.set = do_ip_vs_set_ctl,
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
.owner = THIS_MODULE,
};
/*
* Generic Netlink interface
*/
/* IPVS genetlink family */
static struct genl_family ip_vs_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_MAX,
.netnsok = true, /* Make ipvsadm to work on netns */
};
/* Policy used for first-level command attributes */
static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
[IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_SCHEDNAME_MAXLEN },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
.len = sizeof(struct ip_vs_flags) },
[IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
[IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
struct ip_vs_stats *stats)
{
struct ip_vs_stats_user ustats;
struct nlattr *nl_stats = nla_nest_start(skb, container_type);
if (!nl_stats)
return -EMSGSIZE;
ip_vs_copy_stats(&ustats, stats);
if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_stats);
return -EMSGSIZE;
}
static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct ip_vs_service *svc)
{
struct ip_vs_scheduler *sched;
struct ip_vs_pe *pe;
struct nlattr *nl_service;
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
return -EMSGSIZE;
if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
goto nla_put_failure;
if (svc->fwmark) {
if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
goto nla_put_failure;
} else {
if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
nla_put_be16(skb, IPVS_SVC_ATTR_PORT, svc->port))
goto nla_put_failure;
}
sched = rcu_dereference_protected(svc->scheduler, 1);
pe = rcu_dereference_protected(svc->pe, 1);
if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
(pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
goto nla_put_failure;
nla_nest_end(skb, nl_service);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_service);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_service(struct sk_buff *skb,
struct ip_vs_service *svc,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_SERVICE);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_service(skb, svc) < 0)
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_services(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
nla_put_failure:
mutex_unlock(&__ip_vs_mutex);
cb->args[0] = idx;
return skb->len;
}
static int ip_vs_genl_parse_service(struct net *net,
struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
struct ip_vs_service **ret_svc)
{
struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
struct ip_vs_service *svc;
/* Parse mandatory identifying service fields first */
if (nla == NULL ||
nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
return -EINVAL;
nla_af = attrs[IPVS_SVC_ATTR_AF];
nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
nla_port = attrs[IPVS_SVC_ATTR_PORT];
nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
memset(usvc, 0, sizeof(*usvc));
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
#else
if (usvc->af != AF_INET)
#endif
return -EAFNOSUPPORT;
if (nla_fwmark) {
usvc->protocol = IPPROTO_TCP;
usvc->fwmark = nla_get_u32(nla_fwmark);
} else {
usvc->protocol = nla_get_u16(nla_protocol);
nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
usvc->port = nla_get_be16(nla_port);
usvc->fwmark = 0;
}
rcu_read_lock();
if (usvc->fwmark)
svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
else
svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
rcu_read_unlock();
*ret_svc = svc;
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_sched, *nla_flags, *nla_pe, *nla_timeout,
*nla_netmask;
struct ip_vs_flags flags;
nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
nla_pe = attrs[IPVS_SVC_ATTR_PE_NAME];
nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
return -EINVAL;
nla_memcpy(&flags, nla_flags, sizeof(flags));
/* prefill flags from service if it already exists */
if (svc)
usvc->flags = svc->flags;
/* set new flags from userland */
usvc->flags = (usvc->flags & ~flags.mask) |
(flags.flags & flags.mask);
usvc->sched_name = nla_data(nla_sched);
usvc->pe_name = nla_pe ? nla_data(nla_pe) : NULL;
usvc->timeout = nla_get_u32(nla_timeout);
usvc->netmask = nla_get_be32(nla_netmask);
}
return 0;
}
static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
return ret ? ERR_PTR(ret) : svc;
}
static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
{
struct nlattr *nl_dest;
nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
if (!nl_dest)
return -EMSGSIZE;
if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
(atomic_read(&dest->conn_flags) &
IP_VS_CONN_F_FWD_MASK)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
atomic_read(&dest->weight)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
atomic_read(&dest->activeconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
atomic_read(&dest->inactconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
atomic_read(&dest->persistconns)))
goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
goto nla_put_failure;
nla_nest_end(skb, nl_dest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_dest);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DEST);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_dest(skb, dest) < 0)
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0;
int start = cb->args[0];
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
/* Try to find the service for which to dump destinations */
if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
goto out_err;
svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc) || svc == NULL)
goto out_err;
/* Dump the destinations */
list_for_each_entry(dest, &svc->destinations, n_list) {
if (++idx <= start)
continue;
if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
nla_put_failure:
cb->args[0] = idx;
out_err:
mutex_unlock(&__ip_vs_mutex);
return skb->len;
}
static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
struct nlattr *nla, int full_entry)
{
struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
struct nlattr *nla_addr, *nla_port;
/* Parse mandatory identifying destination fields first */
if (nla == NULL ||
nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
return -EINVAL;
nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
nla_port = attrs[IPVS_DEST_ATTR_PORT];
if (!(nla_addr && nla_port))
return -EINVAL;
memset(udest, 0, sizeof(*udest));
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_be16(nla_port);
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
*nla_l_thresh;
nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
return -EINVAL;
udest->conn_flags = nla_get_u32(nla_fwd)
& IP_VS_CONN_F_FWD_MASK;
udest->weight = nla_get_u32(nla_weight);
udest->u_threshold = nla_get_u32(nla_u_thresh);
udest->l_threshold = nla_get_u32(nla_l_thresh);
}
return 0;
}
static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
const char *mcast_ifn, __u32 syncid)
{
struct nlattr *nl_daemon;
nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
if (!nl_daemon)
return -EMSGSIZE;
if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
goto nla_put_failure;
nla_nest_end(skb, nl_daemon);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_daemon);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
const char *mcast_ifn, __u32 syncid,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DAEMON);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
ipvs->master_mcast_ifn,
ipvs->master_syncid, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
ipvs->backup_mcast_ifn,
ipvs->backup_syncid, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
}
nla_put_failure:
mutex_unlock(&ipvs->sync_mutex);
return skb->len;
}
static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
return start_sync_thread(net,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
}
static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
return stop_sync_thread(net,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
}
static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
t.tcp_fin_timeout =
nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
return ip_vs_set_timeout(net, &t);
}
static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
int ret = 0, cmd;
struct net *net;
struct netns_ipvs *ipvs;
net = skb_sknet(skb);
ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
mutex_lock(&ipvs->sync_mutex);
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
info->attrs[IPVS_CMD_ATTR_DAEMON],
ip_vs_daemon_policy)) {
ret = -EINVAL;
goto out;
}
if (cmd == IPVS_CMD_NEW_DAEMON)
ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(net, daemon_attrs);
out:
mutex_unlock(&ipvs->sync_mutex);
}
return ret;
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_service *svc = NULL;
struct ip_vs_service_user_kern usvc;
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
struct net *net;
net = skb_sknet(skb);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
ret = ip_vs_flush(net, false);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
ret = ip_vs_zero_all(net);
goto out;
}
/* All following commands require a service argument, so check if we
* received a valid one. We need a full service specification when
* adding / editing a service. Only identifying members otherwise. */
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = 1;
ret = ip_vs_genl_parse_service(net, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
goto out;
/* Unless we're adding a new service, the service must already exist */
if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
ret = -ESRCH;
goto out;
}
/* Destination commands require a valid destination argument. For
* adding / editing a destination, we need a full destination
* specification. */
if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
cmd == IPVS_CMD_DEL_DEST) {
if (cmd != IPVS_CMD_DEL_DEST)
need_full_dest = 1;
ret = ip_vs_genl_parse_dest(&udest,
info->attrs[IPVS_CMD_ATTR_DEST],
need_full_dest);
if (ret)
goto out;
}
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
ret = ip_vs_add_service(net, &usvc, &svc);
else
ret = -EEXIST;
break;
case IPVS_CMD_SET_SERVICE:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IPVS_CMD_DEL_SERVICE:
ret = ip_vs_del_service(svc);
/* do not use svc, it can be freed */
break;
case IPVS_CMD_NEW_DEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IPVS_CMD_SET_DEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IPVS_CMD_DEL_DEST:
ret = ip_vs_del_dest(svc, &udest);
break;
case IPVS_CMD_ZERO:
ret = ip_vs_zero_service(svc);
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
struct net *net;
net = skb_sknet(skb);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
reply_cmd = IPVS_CMD_NEW_SERVICE;
else if (cmd == IPVS_CMD_GET_INFO)
reply_cmd = IPVS_CMD_SET_INFO;
else if (cmd == IPVS_CMD_GET_CONFIG)
reply_cmd = IPVS_CMD_SET_CONFIG;
else {
pr_err("unknown Generic Netlink command\n");
return -EINVAL;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
mutex_lock(&__ip_vs_mutex);
reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
if (reply == NULL)
goto nla_put_failure;
switch (cmd) {
case IPVS_CMD_GET_SERVICE:
{
struct ip_vs_service *svc;
svc = ip_vs_genl_find_service(net,
info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
} else if (svc) {
ret = ip_vs_genl_fill_service(msg, svc);
if (ret)
goto nla_put_failure;
} else {
ret = -ESRCH;
goto out_err;
}
break;
}
case IPVS_CMD_GET_CONFIG:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
t.tcp_timeout) ||
nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
t.tcp_fin_timeout))
goto nla_put_failure;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
goto nla_put_failure;
#endif
break;
}
case IPVS_CMD_GET_INFO:
if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
IP_VS_VERSION_CODE) ||
nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
ip_vs_conn_tab_size))
goto nla_put_failure;
break;
}
genlmsg_end(msg, reply);
ret = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
pr_err("not enough space in Netlink message\n");
ret = -EMSGSIZE;
out_err:
nlmsg_free(msg);
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
{
.cmd = IPVS_CMD_NEW_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_SERVICE,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
.dumpit = ip_vs_genl_dump_services,
.policy = ip_vs_cmd_policy,
},
{
.cmd = IPVS_CMD_NEW_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.dumpit = ip_vs_genl_dump_dests,
},
{
.cmd = IPVS_CMD_NEW_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_DEL_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_GET_DAEMON,
.flags = GENL_ADMIN_PERM,
.dumpit = ip_vs_genl_dump_daemons,
},
{
.cmd = IPVS_CMD_SET_CONFIG,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_CONFIG,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_GET_INFO,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_ZERO,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_FLUSH,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
};
static int __init ip_vs_genl_register(void)
{
return genl_register_family_with_ops(&ip_vs_genl_family,
ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops));
}
static void ip_vs_genl_unregister(void)
{
genl_unregister_family(&ip_vs_genl_family);
}
/* End of Generic Netlink interface definitions */
/*
* per netns intit/exit func.
*/
#ifdef CONFIG_SYSCTL
static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ctl_table *tbl;
atomic_set(&ipvs->dropentry, 0);
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
tbl[0].procname = NULL;
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
idx = 0;
ipvs->sysctl_amemthresh = 1024;
tbl[idx++].data = &ipvs->sysctl_amemthresh;
ipvs->sysctl_am_droprate = 10;
tbl[idx++].data = &ipvs->sysctl_am_droprate;
tbl[idx++].data = &ipvs->sysctl_drop_entry;
tbl[idx++].data = &ipvs->sysctl_drop_packet;
#ifdef CONFIG_IP_VS_NFCT
tbl[idx++].data = &ipvs->sysctl_conntrack;
#endif
tbl[idx++].data = &ipvs->sysctl_secure_tcp;
ipvs->sysctl_snat_reroute = 1;
tbl[idx++].data = &ipvs->sysctl_snat_reroute;
ipvs->sysctl_sync_ver = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ver;
ipvs->sysctl_sync_ports = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ports;
ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
ipvs->sysctl_sync_sock_size = 0;
tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
tbl[idx++].data = &ipvs->sysctl_sync_retries;
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
ipvs->sysctl_pmtu_disc = 1;
tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
tbl[idx++].data = &ipvs->sysctl_backup_only;
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
return -ENOMEM;
}
ip_vs_start_estimator(net, &ipvs->tot_stats);
ipvs->sysctl_tbl = tbl;
/* Schedule defense work */
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
return 0;
}
static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
unregister_net_sysctl_table(ipvs->sysctl_hdr);
}
#else
static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
#endif
static struct notifier_block ip_vs_dst_notifier = {
.notifier_call = ip_vs_dst_event,
};
int __net_init ip_vs_control_net_init(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_HLIST_HEAD(&ipvs->rs_table[idx]);
INIT_LIST_HEAD(&ipvs->dest_trash);
spin_lock_init(&ipvs->dest_trash_lock);
setup_timer(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire,
(unsigned long) net);
atomic_set(&ipvs->ftpsvc_counter, 0);
atomic_set(&ipvs->nullsvc_counter, 0);
/* procfs stats */
ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!ipvs->tot_stats.cpustats)
return -ENOMEM;
spin_lock_init(&ipvs->tot_stats.lock);
proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
proc_create("ip_vs_stats_percpu", 0, net->proc_net,
&ip_vs_stats_percpu_fops);
if (ip_vs_control_net_init_sysctl(net))
goto err;
return 0;
err:
free_percpu(ipvs->tot_stats.cpustats);
return -ENOMEM;
}
void __net_exit ip_vs_control_net_cleanup(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
/* Some dest can be in grace period even before cleanup, we have to
* defer ip_vs_trash_cleanup until ip_vs_dest_wait_readers is called.
*/
rcu_barrier();
ip_vs_trash_cleanup(net);
ip_vs_stop_estimator(net, &ipvs->tot_stats);
ip_vs_control_net_cleanup_sysctl(net);
remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
remove_proc_entry("ip_vs_stats", net->proc_net);
remove_proc_entry("ip_vs", net->proc_net);
free_percpu(ipvs->tot_stats.cpustats);
}
int __init ip_vs_register_nl_ioctl(void)
{
int ret;
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
goto err_sock;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
return 0;
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
void ip_vs_unregister_nl_ioctl(void)
{
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
}
int __init ip_vs_control_init(void)
{
int idx;
int ret;
EnterFunction(2);
/* Initialize svc_table, ip_vs_svc_fwm_table */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_HLIST_HEAD(&ip_vs_svc_table[idx]);
INIT_HLIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
smp_wmb(); /* Do we really need it now ? */
ret = register_netdevice_notifier(&ip_vs_dst_notifier);
if (ret < 0)
return ret;
LeaveFunction(2);
return 0;
}
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
unregister_netdevice_notifier(&ip_vs_dst_notifier);
LeaveFunction(2);
}
| gpl-2.0 |
daeiron/kenzo_caf_kernel | drivers/gpu/drm/qxl/qxl_ioctl.c | 2036 | 12041 | /*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/*
* TODO: allocating a new gem(in qxl_bo) for each request.
* This is wasteful since bo's are page aligned.
*/
static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
struct qxl_bo *qobj;
uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM;
if (qxl_alloc->size == 0) {
DRM_ERROR("invalid size %d\n", qxl_alloc->size);
return -EINVAL;
}
ret = qxl_gem_object_create_with_handle(qdev, file_priv,
domain,
qxl_alloc->size,
NULL,
&qobj, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
}
qxl_alloc->handle = handle;
return 0;
}
static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_map *qxl_map = data;
return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
&qxl_map->offset);
}
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
struct qxl_bo *src, uint64_t src_off)
{
void *reloc_page;
reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
src, src_off);
qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
}
static void
apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
struct qxl_bo *src)
{
uint32_t id = 0;
void *reloc_page;
if (src && !src->is_primary)
id = src->surface_id;
reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
}
/* return holding the reference to this object */
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
struct qxl_reloc_list *reloc_list)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
if (!gobj) {
DRM_ERROR("bad bo handle %lld\n", handle);
return NULL;
}
qobj = gem_to_qxl_bo(gobj);
ret = qxl_bo_list_add(reloc_list, qobj);
if (ret)
return NULL;
return qobj;
}
/*
* Usage of execbuffer:
* Relocations need to take into account the full QXLDrawable size.
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
struct qxl_bo *reloc_src_bo;
struct qxl_bo *reloc_dst_bo;
struct drm_qxl_reloc reloc;
void *fb_cmd;
int i, ret;
struct qxl_reloc_list reloc_list;
int unwritten;
uint32_t reloc_dst_offset;
INIT_LIST_HEAD(&reloc_list.bos);
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
struct qxl_release *release;
struct qxl_bo *cmd_bo;
int release_type;
struct drm_qxl_command *commands =
(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
sizeof(user_cmd)))
return -EFAULT;
switch (user_cmd.type) {
case QXL_CMD_DRAW:
release_type = QXL_RELEASE_DRAWABLE;
break;
case QXL_CMD_SURFACE:
case QXL_CMD_CURSOR:
default:
DRM_DEBUG("Only draw commands in execbuffers\n");
return -EINVAL;
break;
}
if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
if (!access_ok(VERIFY_READ,
(void *)(unsigned long)user_cmd.command,
user_cmd.command_size))
return -EFAULT;
ret = qxl_alloc_release_reserved(qdev,
sizeof(union qxl_release_info) +
user_cmd.command_size,
release_type,
&release,
&cmd_bo);
if (ret)
return ret;
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
if (unwritten) {
DRM_ERROR("got unwritten %d\n", unwritten);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EFAULT;
}
for (i = 0 ; i < user_cmd.relocs_num; ++i) {
if (DRM_COPY_FROM_USER(&reloc,
&((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
sizeof(reloc))) {
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EFAULT;
}
/* add the bos to the list of bos to validate -
need to validate first then process relocs? */
if (reloc.dst_handle) {
reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
reloc.dst_handle, &reloc_list);
if (!reloc_dst_bo) {
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EINVAL;
}
reloc_dst_offset = 0;
} else {
reloc_dst_bo = cmd_bo;
reloc_dst_offset = release->release_offset;
}
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
reloc_src_bo =
qxlhw_handle_to_bo(qdev, file_priv,
reloc.src_handle, &reloc_list);
if (!reloc_src_bo) {
if (reloc_dst_bo != cmd_bo)
drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EINVAL;
}
} else
reloc_src_bo = NULL;
if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
reloc_src_bo, reloc.src_offset);
} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
} else {
DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
return -EINVAL;
}
if (reloc_src_bo && reloc_src_bo != cmd_bo) {
qxl_release_add_res(qdev, release, reloc_src_bo);
drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
}
if (reloc_dst_bo != cmd_bo)
drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
}
qxl_fence_releaseable(qdev, release);
ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
if (ret == -ERESTARTSYS) {
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
qxl_bo_list_unreserve(&reloc_list, true);
return ret;
}
qxl_release_unreserve(qdev, release);
}
qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
.right = update_area->right,
.bottom = update_area->bottom};
int ret;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL;
if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom)
return -EINVAL;
gobj = drm_gem_object_lookup(dev, file, update_area->handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
ret = qxl_bo_reserve(qobj, false);
if (ret)
goto out;
if (!qobj->pin_count) {
qxl_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
goto out;
}
ret = qxl_bo_check_id(qdev, qobj);
if (ret)
goto out2;
if (!qobj->surface_id)
DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
ret = qxl_io_update_area(qdev, qobj, &area);
out2:
qxl_bo_unreserve(qobj);
out:
drm_gem_object_unreference_unlocked(gobj);
return ret;
}
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_getparam *param = data;
switch (param->param) {
case QXL_PARAM_NUM_SURFACES:
param->value = qdev->rom->n_surfaces;
break;
case QXL_PARAM_MAX_RELOCS:
param->value = QXL_MAX_RES;
break;
default:
return -EINVAL;
}
return 0;
}
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
if (qdev->pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
return -ENOSYS;
if (qdev->rom->client_capabilities[byte] & (1 << idx))
return 0;
return -ENOSYS;
}
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle;
int ret;
int size, actual_stride;
struct qxl_surface surf;
/* work out size allocate bo with handle */
actual_stride = param->stride < 0 ? -param->stride : param->stride;
size = actual_stride * param->height + actual_stride;
surf.format = param->format;
surf.width = param->width;
surf.height = param->height;
surf.stride = param->stride;
surf.data = 0;
ret = qxl_gem_object_create_with_handle(qdev, file,
QXL_GEM_DOMAIN_SURFACE,
size,
&surf,
&qobj, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
} else
param->handle = handle;
return ret;
}
struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
DRM_AUTH|DRM_UNLOCKED),
};
int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
| gpl-2.0 |
javifo/nameless_kernel_samsung_smdk4412 | arch/arm/mach-at91/board-eb9200.c | 2292 | 3368 | /*
* linux/arch/arm/mach-at91/board-eb9200.c
*
* Copyright (C) 2005 SAN People, adapted for ATEB9200 from Embest
* by Andrew Patrikalakis
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/device.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include "generic.h"
static void __init eb9200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91rm9200_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
/* USART2 on ttyS2. (Rx, Tx) - IRDA */
at91_register_uart(AT91RM9200_ID_US2, 2, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init eb9200_init_irq(void)
{
at91rm9200_init_interrupts(NULL);
}
static struct at91_eth_data __initdata eb9200_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata eb9200_usbh_data = {
.ports = 2,
};
static struct at91_udc_data __initdata eb9200_udc_data = {
.vbus_pin = AT91_PIN_PD4,
.pullup_pin = AT91_PIN_PD5,
};
static struct at91_cf_data __initdata eb9200_cf_data = {
.det_pin = AT91_PIN_PB0,
.rst_pin = AT91_PIN_PC5,
// .irq_pin = ... not connected
// .vcc_pin = ... always powered
};
static struct at91_mmc_data __initdata eb9200_mmc_data = {
.slot_b = 0,
.wire4 = 1,
};
static struct i2c_board_info __initdata eb9200_i2c_devices[] = {
{
I2C_BOARD_INFO("24c512", 0x50),
},
};
static void __init eb9200_board_init(void)
{
/* Serial */
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&eb9200_eth_data);
/* USB Host */
at91_add_device_usbh(&eb9200_usbh_data);
/* USB Device */
at91_add_device_udc(&eb9200_udc_data);
/* I2C */
at91_add_device_i2c(eb9200_i2c_devices, ARRAY_SIZE(eb9200_i2c_devices));
/* Compact Flash */
at91_add_device_cf(&eb9200_cf_data);
/* SPI */
at91_add_device_spi(NULL, 0);
/* MMC */
/* only supports 1 or 4 bit interface, not wired through to SPI */
at91_add_device_mmc(0, &eb9200_mmc_data);
}
MACHINE_START(ATEB9200, "Embest ATEB9200")
.timer = &at91rm9200_timer,
.map_io = at91rm9200_map_io,
.init_early = eb9200_init_early,
.init_irq = eb9200_init_irq,
.init_machine = eb9200_board_init,
MACHINE_END
| gpl-2.0 |
l0rdg3x/AK-OnePlusOne-CAF | arch/arm/mach-msm/qdsp6v2/pcm_out.c | 2548 | 11111 | /*
* Copyright (C) 2009 Google, Inc.
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/msm_audio.h>
#include <linux/slab.h>
#include <linux/wakelock.h>
#include <asm/atomic.h>
#include <sound/q6asm.h>
#include <sound/apr_audio.h>
#include <mach/debug_mm.h>
#include <mach/qdsp6v2/audio_dev_ctl.h>
#define MAX_BUF 2
#define BUFSZ (4800)
struct pcm {
struct mutex lock;
struct mutex write_lock;
spinlock_t dsp_lock;
wait_queue_head_t write_wait;
struct audio_client *ac;
uint32_t sample_rate;
uint32_t channel_count;
uint32_t buffer_size;
uint32_t buffer_count;
uint32_t rec_mode;
uint32_t stream_event;
uint32_t volume;
atomic_t out_count;
atomic_t out_enabled;
atomic_t out_opened;
atomic_t out_stopped;
atomic_t out_prefill;
struct wake_lock wakelock;
};
void pcm_out_cb(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
struct pcm *pcm = (struct pcm *) priv;
unsigned long flags;
spin_lock_irqsave(&pcm->dsp_lock, flags);
switch (opcode) {
case ASM_DATA_EVENT_WRITE_DONE:
atomic_inc(&pcm->out_count);
wake_up(&pcm->write_wait);
break;
case RESET_EVENTS:
reset_device();
break;
default:
break;
}
spin_unlock_irqrestore(&pcm->dsp_lock, flags);
}
static void audio_prevent_sleep(struct pcm *audio)
{
pr_debug("%s:\n", __func__);
wake_lock(&audio->wakelock);
}
static void audio_allow_sleep(struct pcm *audio)
{
pr_debug("%s:\n", __func__);
wake_unlock(&audio->wakelock);
}
static int pcm_out_enable(struct pcm *pcm)
{
if (atomic_read(&pcm->out_enabled))
return 0;
return q6asm_run(pcm->ac, 0, 0, 0);
}
static int pcm_out_disable(struct pcm *pcm)
{
int rc = 0;
if (atomic_read(&pcm->out_opened)) {
atomic_set(&pcm->out_enabled, 0);
atomic_set(&pcm->out_opened, 0);
rc = q6asm_cmd(pcm->ac, CMD_CLOSE);
atomic_set(&pcm->out_stopped, 1);
wake_up(&pcm->write_wait);
}
return rc;
}
static int config(struct pcm *pcm)
{
int rc = 0;
if (!atomic_read(&pcm->out_prefill)) {
pr_debug("%s: pcm prefill\n", __func__);
rc = q6asm_audio_client_buf_alloc(IN, pcm->ac,
pcm->buffer_size, pcm->buffer_count);
if (rc < 0) {
pr_err("Audio Start: Buffer Allocation failed \
rc = %d\n", rc);
goto fail;
}
rc = q6asm_media_format_block_pcm(pcm->ac, pcm->sample_rate,
pcm->channel_count);
if (rc < 0)
pr_err("%s: CMD Format block failed\n", __func__);
atomic_set(&pcm->out_prefill, 1);
atomic_set(&pcm->out_count, pcm->buffer_count);
}
fail:
return rc;
}
static void pcm_event_listner(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct pcm *pcm = (struct pcm *) private_data;
int rc = 0;
switch (evt_id) {
case AUDDEV_EVT_STREAM_VOL_CHG:
pcm->volume = evt_payload->session_vol;
pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, "
"enabled = %d\n", __func__, pcm->volume,
atomic_read(&pcm->out_enabled));
if (atomic_read(&pcm->out_enabled)) {
if (pcm->ac) {
rc = q6asm_set_volume(pcm->ac, pcm->volume);
if (rc < 0)
pr_err("%s: Send Volume command"
"failed rc=%d\n", __func__, rc);
}
}
break;
default:
pr_err("%s:ERROR:wrong event\n", __func__);
break;
}
}
static long pcm_out_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct pcm *pcm = file->private_data;
int rc = 0;
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
memset(&stats, 0, sizeof(stats));
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
mutex_lock(&pcm->lock);
switch (cmd) {
case AUDIO_SET_VOLUME: {
int vol;
if (copy_from_user(&vol, (void *) arg, sizeof(vol))) {
rc = -EFAULT;
break;
}
break;
}
case AUDIO_START: {
pr_info("%s: AUDIO_START\n", __func__);
rc = config(pcm);
if (rc) {
pr_err("%s: Out Configuration failed\n", __func__);
rc = -EFAULT;
break;
}
rc = pcm_out_enable(pcm);
if (rc) {
pr_err("Out enable failed\n");
rc = -EFAULT;
break;
}
audio_prevent_sleep(pcm);
atomic_set(&pcm->out_enabled, 1);
rc = q6asm_set_volume(pcm->ac, pcm->volume);
if (rc < 0)
pr_err("%s: Send Volume command failed rc=%d\n",
__func__, rc);
rc = q6asm_set_lrgain(pcm->ac, 0x2000, 0x2000);
if (rc < 0)
pr_err("%s: Send channel gain failed rc=%d\n",
__func__, rc);
/* disable mute by default */
rc = q6asm_set_mute(pcm->ac, 0);
if (rc < 0)
pr_err("%s: Send mute command failed rc=%d\n",
__func__, rc);
break;
}
case AUDIO_GET_SESSION_ID: {
if (copy_to_user((void *) arg, &pcm->ac->session,
sizeof(unsigned short)))
rc = -EFAULT;
break;
}
case AUDIO_STOP:
break;
case AUDIO_FLUSH:
break;
case AUDIO_SET_CONFIG: {
struct msm_audio_config config;
pr_debug("%s: AUDIO_SET_CONFIG\n", __func__);
if (copy_from_user(&config, (void *) arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.channel_count < 1 || config.channel_count > 2) {
rc = -EINVAL;
break;
}
if (config.sample_rate < 8000 || config.sample_rate > 48000) {
rc = -EINVAL;
break;
}
if (config.buffer_size < 128) {
rc = -EINVAL;
break;
}
pcm->sample_rate = config.sample_rate;
pcm->channel_count = config.channel_count;
pcm->buffer_size = config.buffer_size;
pcm->buffer_count = config.buffer_count;
pr_debug("%s:buffer_size:%d buffer_count:%d sample_rate:%d \
channel_count:%d\n", __func__, pcm->buffer_size,
pcm->buffer_count, pcm->sample_rate,
pcm->channel_count);
break;
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config config;
pr_debug("%s: AUDIO_GET_CONFIG\n", __func__);
memset(&config, 0, sizeof(config));
config.buffer_size = pcm->buffer_size;
config.buffer_count = pcm->buffer_count;
config.sample_rate = pcm->sample_rate;
config.channel_count = pcm->channel_count;
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
if (copy_to_user((void *) arg, &config, sizeof(config)))
rc = -EFAULT;
break;
}
case AUDIO_SET_EQ: {
struct msm_audio_eq_stream_config eq_config;
if (copy_from_user(&eq_config, (void *) arg,
sizeof(eq_config))) {
rc = -EFAULT;
break;
}
rc = q6asm_equalizer(pcm->ac, (void *) &eq_config);
if (rc < 0)
pr_err("%s: EQUALIZER FAILED\n", __func__);
break;
}
default:
rc = -EINVAL;
}
mutex_unlock(&pcm->lock);
return rc;
}
static int pcm_out_open(struct inode *inode, struct file *file)
{
struct pcm *pcm;
int rc = 0;
char name[24];
pr_info("[%s:%s] open\n", __MM_FILE__, __func__);
pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL);
if (!pcm) {
pr_err("%s: Failed to allocated memory\n", __func__);
return -ENOMEM;
}
pcm->channel_count = 2;
pcm->sample_rate = 44100;
pcm->buffer_size = BUFSZ;
pcm->buffer_count = MAX_BUF;
pcm->stream_event = AUDDEV_EVT_STREAM_VOL_CHG;
pcm->volume = 0x2000;
pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_out_cb, (void *)pcm);
if (!pcm->ac) {
pr_err("%s: Could not allocate memory\n", __func__);
rc = -ENOMEM;
goto fail;
}
rc = q6asm_open_write(pcm->ac, FORMAT_LINEAR_PCM);
if (rc < 0) {
pr_err("%s: pcm out open failed for session %d\n", __func__,
pcm->ac->session);
rc = -EINVAL;
goto fail;
}
mutex_init(&pcm->lock);
mutex_init(&pcm->write_lock);
init_waitqueue_head(&pcm->write_wait);
spin_lock_init(&pcm->dsp_lock);
atomic_set(&pcm->out_enabled, 0);
atomic_set(&pcm->out_stopped, 0);
atomic_set(&pcm->out_count, pcm->buffer_count);
atomic_set(&pcm->out_prefill, 0);
atomic_set(&pcm->out_opened, 1);
snprintf(name, sizeof name, "audio_pcm_%x", pcm->ac->session);
wake_lock_init(&pcm->wakelock, WAKE_LOCK_SUSPEND, name);
rc = auddev_register_evt_listner(pcm->stream_event,
AUDDEV_CLNT_DEC,
pcm->ac->session,
pcm_event_listner,
(void *)pcm);
if (rc < 0) {
pr_err("%s: failed to register listner\n", __func__);
goto fail;
}
file->private_data = pcm;
pr_info("[%s:%s] open session id[%d]\n", __MM_FILE__,
__func__, pcm->ac->session);
return 0;
fail:
if (pcm->ac)
q6asm_audio_client_free(pcm->ac);
kfree(pcm);
return rc;
}
static ssize_t pcm_out_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct pcm *pcm = file->private_data;
const char __user *start = buf;
int xfer;
char *bufptr;
uint32_t idx;
void *data;
int rc = 0;
uint32_t size;
if (!pcm->ac)
return -ENODEV;
if (!atomic_read(&pcm->out_enabled)) {
rc = config(pcm);
if (rc < 0)
return rc;
}
mutex_lock(&pcm->write_lock);
while (count > 0) {
rc = wait_event_timeout(pcm->write_wait,
(atomic_read(&pcm->out_count) ||
atomic_read(&pcm->out_stopped)), 1 * HZ);
if (!rc) {
pr_err("%s: wait_event_timeout failed for session %d\n",
__func__, pcm->ac->session);
goto fail;
}
if (atomic_read(&pcm->out_stopped) &&
!atomic_read(&pcm->out_count)) {
pr_info("%s: pcm stopped out_count 0\n", __func__);
mutex_unlock(&pcm->write_lock);
return 0;
}
data = q6asm_is_cpu_buf_avail(IN, pcm->ac, &size, &idx);
bufptr = data;
if (bufptr) {
xfer = count;
if (xfer > BUFSZ)
xfer = BUFSZ;
if (copy_from_user(bufptr, buf, xfer)) {
rc = -EFAULT;
goto fail;
}
buf += xfer;
count -= xfer;
rc = q6asm_write(pcm->ac, xfer, 0, 0, NO_TIMESTAMP);
wmb();
if (rc < 0) {
rc = -EFAULT;
goto fail;
}
}
atomic_dec(&pcm->out_count);
}
rc = buf - start;
fail:
mutex_unlock(&pcm->write_lock);
return rc;
}
static int pcm_out_release(struct inode *inode, struct file *file)
{
struct pcm *pcm = file->private_data;
pr_info("[%s:%s] release session id[%d]\n", __MM_FILE__,
__func__, pcm->ac->session);
if (pcm->ac)
pcm_out_disable(pcm);
msm_clear_session_id(pcm->ac->session);
auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, pcm->ac->session);
q6asm_audio_client_free(pcm->ac);
audio_allow_sleep(pcm);
wake_lock_destroy(&pcm->wakelock);
mutex_destroy(&pcm->lock);
mutex_destroy(&pcm->write_lock);
kfree(pcm);
return 0;
}
static const struct file_operations pcm_out_fops = {
.owner = THIS_MODULE,
.open = pcm_out_open,
.write = pcm_out_write,
.release = pcm_out_release,
.unlocked_ioctl = pcm_out_ioctl,
};
struct miscdevice pcm_out_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_pcm_out",
.fops = &pcm_out_fops,
};
static int __init pcm_out_init(void)
{
return misc_register(&pcm_out_misc);
}
device_initcall(pcm_out_init);
| gpl-2.0 |
damienyong/Kernel-3.0.8 | kernel/drivers/hwmon/lm92.c | 4084 | 13438 | /*
* lm92 - Hardware monitoring driver
* Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
*
* Based on the lm90 driver, with some ideas taken from the lm_sensors
* lm92 driver as well.
*
* The LM92 is a sensor chip made by National Semiconductor. It reports
* its own temperature with a 0.0625 deg resolution and a 0.33 deg
* accuracy. Complete datasheet can be obtained from National's website
* at:
* http://www.national.com/pf/LM/LM92.html
*
* This driver also supports the MAX6635 sensor chip made by Maxim.
* This chip is compatible with the LM92, but has a lesser accuracy
* (1.0 deg). Complete datasheet can be obtained from Maxim's website
* at:
* http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
*
* Since the LM92 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
*
* Support could easily be added for the National Semiconductor LM76
* and Maxim MAX6633 and MAX6634 chips, which are mostly compatible
* with the LM92.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* The LM92 and MAX6635 have 2 two-state pins for address selection,
resulting in 4 possible addresses. */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
#define LM92_REG_TEMP_HYST 0x02 /* 16-bit, RW */
#define LM92_REG_TEMP_CRIT 0x03 /* 16-bit, RW */
#define LM92_REG_TEMP_LOW 0x04 /* 16-bit, RW */
#define LM92_REG_TEMP_HIGH 0x05 /* 16-bit, RW */
#define LM92_REG_MAN_ID 0x07 /* 16-bit, RO, LM92 only */
/* The LM92 uses signed 13-bit values with LSB = 0.0625 degree Celsius,
left-justified in 16-bit registers. No rounding is done, with such
a resolution it's just not worth it. Note that the MAX6635 doesn't
make use of the 4 lower bits for limits (i.e. effective resolution
for limits is 1 degree Celsius). */
static inline int TEMP_FROM_REG(s16 reg)
{
return reg / 8 * 625 / 10;
}
static inline s16 TEMP_TO_REG(int val)
{
if (val <= -60000)
return -60000 * 10 / 625 * 8;
if (val >= 160000)
return 160000 * 10 / 625 * 8;
return val * 10 / 625 * 8;
}
/* Alarm flags are stored in the 3 LSB of the temperature register */
static inline u8 ALARMS_FROM_REG(s16 reg)
{
return reg & 0x0007;
}
/* Driver data (common to all clients) */
static struct i2c_driver lm92_driver;
/* Client data (each client gets its own) */
struct lm92_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
s16 temp1_input, temp1_crit, temp1_min, temp1_max, temp1_hyst;
};
/*
* Sysfs attributes and callback functions
*/
static struct lm92_data *lm92_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ)
|| !data->valid) {
dev_dbg(&client->dev, "Updating lm92 data\n");
data->temp1_input = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP));
data->temp1_hyst = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HYST));
data->temp1_crit = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_CRIT));
data->temp1_min = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_LOW));
data->temp1_max = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HIGH));
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
#define show_temp(value) \
static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct lm92_data *data = lm92_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \
}
show_temp(temp1_input);
show_temp(temp1_crit);
show_temp(temp1_min);
show_temp(temp1_max);
#define set_temp(value, reg) \
static ssize_t set_##value(struct device *dev, struct device_attribute *attr, const char *buf, \
size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm92_data *data = i2c_get_clientdata(client); \
long val = simple_strtol(buf, NULL, 10); \
\
mutex_lock(&data->update_lock); \
data->value = TEMP_TO_REG(val); \
i2c_smbus_write_word_data(client, reg, swab16(data->value)); \
mutex_unlock(&data->update_lock); \
return count; \
}
set_temp(temp1_crit, LM92_REG_TEMP_CRIT);
set_temp(temp1_min, LM92_REG_TEMP_LOW);
set_temp(temp1_max, LM92_REG_TEMP_HIGH);
static ssize_t show_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_crit)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_max_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_max)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_min_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_min)
+ TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t set_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val;
i2c_smbus_write_word_data(client, LM92_REG_TEMP_HYST,
swab16(TEMP_TO_REG(data->temp1_hyst)));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input));
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", (data->temp1_input >> bitnr) & 1);
}
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit,
set_temp1_crit);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp1_crit_hyst,
set_temp1_crit_hyst);
static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp1_min,
set_temp1_min);
static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp1_min_hyst, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp1_max,
set_temp1_max);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
/*
* Detection and registration
*/
static void lm92_init_client(struct i2c_client *client)
{
u8 config;
/* Start the conversions if needed */
config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
if (config & 0x01)
i2c_smbus_write_byte_data(client, LM92_REG_CONFIG,
config & 0xFE);
}
/* The MAX6635 has no identification register, so we have to use tricks
to identify it reliably. This is somewhat slow.
Note that we do NOT rely on the 2 MSB of the configuration register
always reading 0, as suggested by the datasheet, because it was once
reported not to be true. */
static int max6635_check(struct i2c_client *client)
{
u16 temp_low, temp_high, temp_hyst, temp_crit;
u8 conf;
int i;
/* No manufacturer ID register, so a read from this address will
always return the last read value. */
temp_low = i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_low)
return 0;
temp_high = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_high)
return 0;
/* Limits are stored as integer values (signed, 9-bit). */
if ((temp_low & 0x7f00) || (temp_high & 0x7f00))
return 0;
temp_hyst = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST);
temp_crit = i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT);
if ((temp_hyst & 0x7f00) || (temp_crit & 0x7f00))
return 0;
/* Registers addresses were found to cycle over 16-byte boundaries.
We don't test all registers with all offsets so as to save some
reads and time, but this should still be sufficient to dismiss
non-MAX6635 chips. */
conf = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
for (i=16; i<96; i*=2) {
if (temp_hyst != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HYST + i - 16)
|| temp_crit != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_CRIT + i)
|| temp_low != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_LOW + i + 16)
|| temp_high != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HIGH + i + 32)
|| conf != i2c_smbus_read_byte_data(client,
LM92_REG_CONFIG + i))
return 0;
}
return 1;
}
static struct attribute *lm92_attributes[] = {
&dev_attr_temp1_input.attr,
&dev_attr_temp1_crit.attr,
&dev_attr_temp1_crit_hyst.attr,
&dev_attr_temp1_min.attr,
&dev_attr_temp1_min_hyst.attr,
&dev_attr_temp1_max.attr,
&dev_attr_temp1_max_hyst.attr,
&dev_attr_alarms.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm92_group = {
.attrs = lm92_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
u8 config;
u16 man_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG);
man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID);
if ((config & 0xe0) == 0x00 && man_id == 0x0180)
pr_info("lm92: Found National Semiconductor LM92 chip\n");
else if (max6635_check(new_client))
pr_info("lm92: Found Maxim MAX6635 chip\n");
else
return -ENODEV;
strlcpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
static int lm92_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct lm92_data *data;
int err;
data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the chipset */
lm92_init_client(new_client);
/* Register sysfs hooks */
if ((err = sysfs_create_group(&new_client->dev.kobj, &lm92_group)))
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
exit_free:
kfree(data);
exit:
return err;
}
static int lm92_remove(struct i2c_client *client)
{
struct lm92_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm92_group);
kfree(data);
return 0;
}
/*
* Module and driver stuff
*/
static const struct i2c_device_id lm92_id[] = {
{ "lm92", 0 },
/* max6635 could be added here */
{ }
};
MODULE_DEVICE_TABLE(i2c, lm92_id);
static struct i2c_driver lm92_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm92",
},
.probe = lm92_probe,
.remove = lm92_remove,
.id_table = lm92_id,
.detect = lm92_detect,
.address_list = normal_i2c,
};
static int __init sensors_lm92_init(void)
{
return i2c_add_driver(&lm92_driver);
}
static void __exit sensors_lm92_exit(void)
{
i2c_del_driver(&lm92_driver);
}
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM92/MAX6635 driver");
MODULE_LICENSE("GPL");
module_init(sensors_lm92_init);
module_exit(sensors_lm92_exit);
| gpl-2.0 |
wulsic/Hyper_CM11 | drivers/hwmon/lm92.c | 4084 | 13438 | /*
* lm92 - Hardware monitoring driver
* Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
*
* Based on the lm90 driver, with some ideas taken from the lm_sensors
* lm92 driver as well.
*
* The LM92 is a sensor chip made by National Semiconductor. It reports
* its own temperature with a 0.0625 deg resolution and a 0.33 deg
* accuracy. Complete datasheet can be obtained from National's website
* at:
* http://www.national.com/pf/LM/LM92.html
*
* This driver also supports the MAX6635 sensor chip made by Maxim.
* This chip is compatible with the LM92, but has a lesser accuracy
* (1.0 deg). Complete datasheet can be obtained from Maxim's website
* at:
* http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
*
* Since the LM92 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
*
* Support could easily be added for the National Semiconductor LM76
* and Maxim MAX6633 and MAX6634 chips, which are mostly compatible
* with the LM92.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* The LM92 and MAX6635 have 2 two-state pins for address selection,
resulting in 4 possible addresses. */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
#define LM92_REG_TEMP_HYST 0x02 /* 16-bit, RW */
#define LM92_REG_TEMP_CRIT 0x03 /* 16-bit, RW */
#define LM92_REG_TEMP_LOW 0x04 /* 16-bit, RW */
#define LM92_REG_TEMP_HIGH 0x05 /* 16-bit, RW */
#define LM92_REG_MAN_ID 0x07 /* 16-bit, RO, LM92 only */
/* The LM92 uses signed 13-bit values with LSB = 0.0625 degree Celsius,
left-justified in 16-bit registers. No rounding is done, with such
a resolution it's just not worth it. Note that the MAX6635 doesn't
make use of the 4 lower bits for limits (i.e. effective resolution
for limits is 1 degree Celsius). */
static inline int TEMP_FROM_REG(s16 reg)
{
return reg / 8 * 625 / 10;
}
static inline s16 TEMP_TO_REG(int val)
{
if (val <= -60000)
return -60000 * 10 / 625 * 8;
if (val >= 160000)
return 160000 * 10 / 625 * 8;
return val * 10 / 625 * 8;
}
/* Alarm flags are stored in the 3 LSB of the temperature register */
static inline u8 ALARMS_FROM_REG(s16 reg)
{
return reg & 0x0007;
}
/* Driver data (common to all clients) */
static struct i2c_driver lm92_driver;
/* Client data (each client gets its own) */
struct lm92_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
s16 temp1_input, temp1_crit, temp1_min, temp1_max, temp1_hyst;
};
/*
* Sysfs attributes and callback functions
*/
static struct lm92_data *lm92_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ)
|| !data->valid) {
dev_dbg(&client->dev, "Updating lm92 data\n");
data->temp1_input = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP));
data->temp1_hyst = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HYST));
data->temp1_crit = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_CRIT));
data->temp1_min = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_LOW));
data->temp1_max = swab16(i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HIGH));
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
#define show_temp(value) \
static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct lm92_data *data = lm92_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \
}
show_temp(temp1_input);
show_temp(temp1_crit);
show_temp(temp1_min);
show_temp(temp1_max);
#define set_temp(value, reg) \
static ssize_t set_##value(struct device *dev, struct device_attribute *attr, const char *buf, \
size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm92_data *data = i2c_get_clientdata(client); \
long val = simple_strtol(buf, NULL, 10); \
\
mutex_lock(&data->update_lock); \
data->value = TEMP_TO_REG(val); \
i2c_smbus_write_word_data(client, reg, swab16(data->value)); \
mutex_unlock(&data->update_lock); \
return count; \
}
set_temp(temp1_crit, LM92_REG_TEMP_CRIT);
set_temp(temp1_min, LM92_REG_TEMP_LOW);
set_temp(temp1_max, LM92_REG_TEMP_HIGH);
static ssize_t show_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_crit)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_max_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_max)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_min_hyst(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_min)
+ TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t set_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val;
i2c_smbus_write_word_data(client, LM92_REG_TEMP_HYST,
swab16(TEMP_TO_REG(data->temp1_hyst)));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input));
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", (data->temp1_input >> bitnr) & 1);
}
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit,
set_temp1_crit);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp1_crit_hyst,
set_temp1_crit_hyst);
static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp1_min,
set_temp1_min);
static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp1_min_hyst, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp1_max,
set_temp1_max);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
/*
* Detection and registration
*/
static void lm92_init_client(struct i2c_client *client)
{
u8 config;
/* Start the conversions if needed */
config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
if (config & 0x01)
i2c_smbus_write_byte_data(client, LM92_REG_CONFIG,
config & 0xFE);
}
/* The MAX6635 has no identification register, so we have to use tricks
to identify it reliably. This is somewhat slow.
Note that we do NOT rely on the 2 MSB of the configuration register
always reading 0, as suggested by the datasheet, because it was once
reported not to be true. */
static int max6635_check(struct i2c_client *client)
{
u16 temp_low, temp_high, temp_hyst, temp_crit;
u8 conf;
int i;
/* No manufacturer ID register, so a read from this address will
always return the last read value. */
temp_low = i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_low)
return 0;
temp_high = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_high)
return 0;
/* Limits are stored as integer values (signed, 9-bit). */
if ((temp_low & 0x7f00) || (temp_high & 0x7f00))
return 0;
temp_hyst = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST);
temp_crit = i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT);
if ((temp_hyst & 0x7f00) || (temp_crit & 0x7f00))
return 0;
/* Registers addresses were found to cycle over 16-byte boundaries.
We don't test all registers with all offsets so as to save some
reads and time, but this should still be sufficient to dismiss
non-MAX6635 chips. */
conf = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
for (i=16; i<96; i*=2) {
if (temp_hyst != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HYST + i - 16)
|| temp_crit != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_CRIT + i)
|| temp_low != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_LOW + i + 16)
|| temp_high != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HIGH + i + 32)
|| conf != i2c_smbus_read_byte_data(client,
LM92_REG_CONFIG + i))
return 0;
}
return 1;
}
static struct attribute *lm92_attributes[] = {
&dev_attr_temp1_input.attr,
&dev_attr_temp1_crit.attr,
&dev_attr_temp1_crit_hyst.attr,
&dev_attr_temp1_min.attr,
&dev_attr_temp1_min_hyst.attr,
&dev_attr_temp1_max.attr,
&dev_attr_temp1_max_hyst.attr,
&dev_attr_alarms.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm92_group = {
.attrs = lm92_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
u8 config;
u16 man_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG);
man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID);
if ((config & 0xe0) == 0x00 && man_id == 0x0180)
pr_info("lm92: Found National Semiconductor LM92 chip\n");
else if (max6635_check(new_client))
pr_info("lm92: Found Maxim MAX6635 chip\n");
else
return -ENODEV;
strlcpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
static int lm92_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct lm92_data *data;
int err;
data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the chipset */
lm92_init_client(new_client);
/* Register sysfs hooks */
if ((err = sysfs_create_group(&new_client->dev.kobj, &lm92_group)))
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
exit_free:
kfree(data);
exit:
return err;
}
static int lm92_remove(struct i2c_client *client)
{
struct lm92_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm92_group);
kfree(data);
return 0;
}
/*
* Module and driver stuff
*/
static const struct i2c_device_id lm92_id[] = {
{ "lm92", 0 },
/* max6635 could be added here */
{ }
};
MODULE_DEVICE_TABLE(i2c, lm92_id);
static struct i2c_driver lm92_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm92",
},
.probe = lm92_probe,
.remove = lm92_remove,
.id_table = lm92_id,
.detect = lm92_detect,
.address_list = normal_i2c,
};
static int __init sensors_lm92_init(void)
{
return i2c_add_driver(&lm92_driver);
}
static void __exit sensors_lm92_exit(void)
{
i2c_del_driver(&lm92_driver);
}
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM92/MAX6635 driver");
MODULE_LICENSE("GPL");
module_init(sensors_lm92_init);
module_exit(sensors_lm92_exit);
| gpl-2.0 |
1N4148/agni | arch/mips/kernel/8250-platform.c | 4596 | 1091 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#define PORT(base, int) \
{ \
.iobase = base, \
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
static struct plat_serial8250_port uart8250_data[] = {
PORT(0x3F8, 4),
PORT(0x2F8, 3),
PORT(0x3E8, 4),
PORT(0x2E8, 3),
{ },
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static int __init uart8250_init(void)
{
return platform_device_register(&uart8250_device);
}
module_init(uart8250_init);
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250 UART probe driver");
| gpl-2.0 |
OptiPurity/kernel_lge_hammerhead | arch/powerpc/platforms/powermac/pci.c | 4596 | 38568 | /*
* Support for PCI bridges found on Power Macintoshes.
*
* Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/grackle.h>
#include <asm/ppc-pci.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/* XXX Could be per-controller, but I don't think we risk anything by
* assuming we won't have both UniNorth and Bandit */
static int has_uninorth;
#ifdef CONFIG_PPC64
static struct pci_controller *u3_agp;
#else
static int has_second_ohare;
#endif /* CONFIG_PPC64 */
extern int pcibios_assign_bus_offset;
struct device_node *k2_skiplist[2];
/*
* Magic constants for enabling cache coherency in the bandit/PSX bridge.
*/
#define BANDIT_DEVID_2 8
#define BANDIT_REVID 3
#define BANDIT_DEVNUM 11
#define BANDIT_MAGIC 0x50
#define BANDIT_COHERENT 0x40
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
for (; node != 0;node = node->sibling) {
const int * bus_range;
const unsigned int *class_code;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
class_code = of_get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range != NULL && len > 2 * sizeof(int)) {
if (bus_range[1] > higher)
higher = bus_range[1];
}
higher = fixup_one_level_bus_range(node->child, higher);
}
return higher;
}
/* This routine fixes the "bus-range" property of all bridges in the
* system since they tend to have their "last" member wrong on macs
*
* Note that the bus numbers manipulated here are OF bus numbers, they
* are not Linux bus numbers.
*/
static void __init fixup_bus_range(struct device_node *bridge)
{
int *bus_range, len;
struct property *prop;
/* Lookup the "bus-range" property for the hose */
prop = of_find_property(bridge, "bus-range", &len);
if (prop == NULL || prop->length < 2 * sizeof(int))
return;
bus_range = prop->value;
bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
}
/*
* Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
*
* The "Bandit" version is present in all early PCI PowerMacs,
* and up to the first ones using Grackle. Some machines may
* have 2 bandit controllers (2 PCI busses).
*
* "Chaos" is used in some "Bandit"-type machines as a bridge
* for the separate display bus. It is accessed the same
* way as bandit, but cannot be probed for devices. It therefore
* has its own config access functions.
*
* The "UniNorth" version is present in all Core99 machines
* (iBook, G4, new IMacs, and all the recent Apple machines).
* It contains 3 controllers in one ASIC.
*
* The U3 is the bridge used on G5 machines. It contains an
* AGP bus which is dealt with the old UniNorth access routines
* and a HyperTransport bus which uses its own set of access
* functions.
*/
#define MACRISC_CFA0(devfn, off) \
((1 << (unsigned int)PCI_SLOT(dev_fn)) \
| (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
| (((unsigned int)(off)) & 0xFCUL))
#define MACRISC_CFA1(bus, devfn, off) \
((((unsigned int)(bus)) << 16) \
|(((unsigned int)(devfn)) << 8) \
|(((unsigned int)(off)) & 0xFCUL) \
|1UL)
static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, u8 offset)
{
unsigned int caddr;
if (bus == hose->first_busno) {
if (dev_fn < (11 << 3))
return NULL;
caddr = MACRISC_CFA0(dev_fn, offset);
} else
caddr = MACRISC_CFA1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= has_uninorth ? 0x07 : 0x03;
return hose->cfg_data + offset;
}
static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops macrisc_pci_ops =
{
.read = macrisc_read_config,
.write = macrisc_write_config,
};
#ifdef CONFIG_PPC32
/*
* Verify that a specific (bus, dev_fn) exists on chaos
*/
static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
{
struct device_node *np;
const u32 *vendor, *device;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
np = of_pci_find_child_device(bus->dev.of_node, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
vendor = of_get_property(np, "vendor-id", NULL);
device = of_get_property(np, "device-id", NULL);
if (vendor == NULL || device == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
&& (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static int
chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
int result = chaos_validate_dev(bus, devfn, offset);
if (result == PCIBIOS_BAD_REGISTER_NUMBER)
*val = ~0U;
if (result != PCIBIOS_SUCCESSFUL)
return result;
return macrisc_read_config(bus, devfn, offset, len, val);
}
static int
chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
int result = chaos_validate_dev(bus, devfn, offset);
if (result != PCIBIOS_SUCCESSFUL)
return result;
return macrisc_write_config(bus, devfn, offset, len, val);
}
static struct pci_ops chaos_pci_ops =
{
.read = chaos_read_config,
.write = chaos_write_config,
};
static void __init setup_chaos(struct pci_controller *hose,
struct resource *addr)
{
/* assume a `chaos' bridge */
hose->ops = &chaos_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
}
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
/*
* These versions of U3 HyperTransport config space access ops do not
* implement self-view of the HT host yet
*/
/*
* This function deals with some "special cases" devices.
*
* 0 -> No special case
* 1 -> Skip the device but act as if the access was successful
* (return 0xff's on reads, eventually, cache config space
* accesses in a later version)
* -1 -> Hide the device (unsuccessful access)
*/
static int u3_ht_skip_device(struct pci_controller *hose,
struct pci_bus *bus, unsigned int devfn)
{
struct device_node *busdn, *dn;
int i;
/* We only allow config cycles to devices that are in OF device-tree
* as we are apparently having some weird things going on with some
* revs of K2 on recent G5s, except for the host bridge itself, which
* is missing from the tree but we know we can probe.
*/
if (bus->self)
busdn = pci_device_to_OF_node(bus->self);
else if (devfn == 0)
return 0;
else
busdn = hose->dn;
for (dn = busdn->child; dn; dn = dn->sibling)
if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
break;
if (dn == NULL)
return -1;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here.
*/
for (i=0; i<2; i++)
if (k2_skiplist[i] == dn)
return 1;
return 0;
}
#define U3_HT_CFA0(devfn, off) \
((((unsigned int)devfn) << 8) | offset)
#define U3_HT_CFA1(bus, devfn, off) \
(U3_HT_CFA0(devfn, off) \
+ (((unsigned int)bus) << 16) \
+ 0x01000000UL)
static void __iomem *u3_ht_cfg_access(struct pci_controller *hose, u8 bus,
u8 devfn, u8 offset, int *swap)
{
*swap = 1;
if (bus == hose->first_busno) {
if (devfn != 0)
return hose->cfg_data + U3_HT_CFA0(devfn, offset);
*swap = 0;
return ((void __iomem *)hose->cfg_addr) + (offset << 2);
} else
return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
}
static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
void __iomem *addr;
int swap;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (u3_ht_skip_device(hose, bus, devfn)) {
case 0:
break;
case 1:
switch (len) {
case 1:
*val = 0xff; break;
case 2:
*val = 0xffff; break;
default:
*val = 0xfffffffful; break;
}
return PCIBIOS_SUCCESSFUL;
default:
return PCIBIOS_DEVICE_NOT_FOUND;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = swap ? in_le16(addr) : in_be16(addr);
break;
default:
*val = swap ? in_le32(addr) : in_be32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
void __iomem *addr;
int swap;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (u3_ht_skip_device(hose, bus, devfn)) {
case 0:
break;
case 1:
return PCIBIOS_SUCCESSFUL;
default:
return PCIBIOS_DEVICE_NOT_FOUND;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
swap ? out_le16(addr, val) : out_be16(addr, val);
break;
default:
swap ? out_le32(addr, val) : out_be32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u3_ht_pci_ops =
{
.read = u3_ht_read_config,
.write = u3_ht_write_config,
};
#define U4_PCIE_CFA0(devfn, off) \
((1 << ((unsigned int)PCI_SLOT(dev_fn))) \
| (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
| ((((unsigned int)(off)) >> 8) << 28) \
| (((unsigned int)(off)) & 0xfcU))
#define U4_PCIE_CFA1(bus, devfn, off) \
((((unsigned int)(bus)) << 16) \
|(((unsigned int)(devfn)) << 8) \
| ((((unsigned int)(off)) >> 8) << 28) \
|(((unsigned int)(off)) & 0xfcU) \
|1UL)
static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, int offset)
{
unsigned int caddr;
if (bus == hose->first_busno) {
caddr = U4_PCIE_CFA0(dev_fn, offset);
} else
caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= 0x03;
return hose->cfg_data + offset;
}
static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u4_pcie_pci_ops =
{
.read = u4_pcie_read_config,
.write = u4_pcie_write_config,
};
static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
{
/* Apple's device-tree "hides" the root complex virtual P2P bridge
* on U4. However, Linux sees it, causing the PCI <-> OF matching
* code to fail to properly match devices below it. This works around
* it by setting the node of the bridge to point to the PHB node,
* which is not entirely correct but fixes the matching code and
* doesn't break anything else. It's also the simplest possible fix.
*/
if (dev->dev.of_node == NULL)
dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
/*
* For a bandit bridge, turn on cache coherency if necessary.
* N.B. we could clean this up using the hose ops directly.
*/
static void __init init_bandit(struct pci_controller *bp)
{
unsigned int vendev, magic;
int rev;
/* read the word at offset 0 in config space for device 11 */
out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
udelay(2);
vendev = in_le32(bp->cfg_data);
if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
PCI_VENDOR_ID_APPLE) {
/* read the revision id */
out_le32(bp->cfg_addr,
(1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
udelay(2);
rev = in_8(bp->cfg_data);
if (rev != BANDIT_REVID)
printk(KERN_WARNING
"Unknown revision %d for bandit\n", rev);
} else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
return;
}
/* read the word at offset 0x50 */
out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
udelay(2);
magic = in_le32(bp->cfg_data);
if ((magic & BANDIT_COHERENT) != 0)
return;
magic |= BANDIT_COHERENT;
udelay(2);
out_le32(bp->cfg_data, magic);
printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
}
/*
* Tweak the PCI-PCI bridge chip on the blue & white G3s.
*/
static void __init init_p2pbridge(void)
{
struct device_node *p2pbridge;
struct pci_controller* hose;
u8 bus, devfn;
u16 val;
/* XXX it would be better here to identify the specific
PCI-PCI bridge chip we have. */
p2pbridge = of_find_node_by_name(NULL, "pci-bridge");
if (p2pbridge == NULL
|| p2pbridge->parent == NULL
|| strcmp(p2pbridge->parent->name, "pci") != 0)
goto done;
if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
DBG("Can't find PCI infos for PCI<->PCI bridge\n");
goto done;
}
/* Warning: At this point, we have not yet renumbered all busses.
* So we must use OF walking to find out hose
*/
hose = pci_find_hose_for_OF_device(p2pbridge);
if (!hose) {
DBG("Can't find hose for PCI<->PCI bridge\n");
goto done;
}
if (early_read_config_word(hose, bus, devfn,
PCI_BRIDGE_CONTROL, &val) < 0) {
printk(KERN_ERR "init_p2pbridge: couldn't read bridge"
" control\n");
goto done;
}
val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
done:
of_node_put(p2pbridge);
}
static void __init init_second_ohare(void)
{
struct device_node *np = of_find_node_by_name(NULL, "pci106b,7");
unsigned char bus, devfn;
unsigned short cmd;
if (np == NULL)
return;
/* This must run before we initialize the PICs since the second
* ohare hosts a PIC that will be accessed there.
*/
if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
struct pci_controller* hose =
pci_find_hose_for_OF_device(np);
if (!hose) {
printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
of_node_put(np);
return;
}
early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
cmd &= ~PCI_COMMAND_IO;
early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
has_second_ohare = 1;
of_node_put(np);
}
/*
* Some Apple desktop machines have a NEC PD720100A USB2 controller
* on the motherboard. Open Firmware, on these, will disable the
* EHCI part of it so it behaves like a pair of OHCI's. This fixup
* code re-enables it ;)
*/
static void __init fixup_nec_usb2(void)
{
struct device_node *nec;
for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
struct pci_controller *hose;
u32 data;
const u32 *prop;
u8 bus, devfn;
prop = of_get_property(nec, "vendor-id", NULL);
if (prop == NULL)
continue;
if (0x1033 != *prop)
continue;
prop = of_get_property(nec, "device-id", NULL);
if (prop == NULL)
continue;
if (0x0035 != *prop)
continue;
prop = of_get_property(nec, "reg", NULL);
if (prop == NULL)
continue;
devfn = (prop[0] >> 8) & 0xff;
bus = (prop[0] >> 16) & 0xff;
if (PCI_FUNC(devfn) != 0)
continue;
hose = pci_find_hose_for_OF_device(nec);
if (!hose)
continue;
early_read_config_dword(hose, bus, devfn, 0xe4, &data);
if (data & 1UL) {
printk("Found NEC PD720100A USB2 chip with disabled"
" EHCI, fixing up...\n");
data &= ~1UL;
early_write_config_dword(hose, bus, devfn, 0xe4, data);
}
}
}
static void __init setup_bandit(struct pci_controller *hose,
struct resource *addr)
{
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
init_bandit(hose);
}
static int __init setup_uninorth(struct pci_controller *hose,
struct resource *addr)
{
pci_add_flags(PCI_REASSIGN_ALL_BUS);
has_uninorth = 1;
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
/* We "know" that the bridge at f2000000 has the PCI slots. */
return addr->start == 0xf2000000;
}
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
static void __init setup_u3_agp(struct pci_controller* hose)
{
/* On G5, we move AGP up to high bus number so we don't need
* to reassign bus numbers for HT. If we ever have P2P bridges
* on AGP, we'll have to move pci_assign_all_busses to the
* pci_controller structure so we enable it for AGP and not for
* HT childs.
* We hard code the address because of the different size of
* the reg address cell, we shall fix that by killing struct
* reg_property and using some accessor functions instead
*/
hose->first_busno = 0xf0;
hose->last_busno = 0xff;
has_uninorth = 1;
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
u3_agp = hose;
}
static void __init setup_u4_pcie(struct pci_controller* hose)
{
/* We currently only implement the "non-atomic" config space, to
* be optimised later.
*/
hose->ops = &u4_pcie_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
/* The bus contains a bridge from root -> device, we need to
* make it visible on bus 0 so that we pick the right type
* of config cycles. If we didn't, we would have to force all
* config cycles to be type 1. So we override the "bus-range"
* property here
*/
hose->first_busno = 0x00;
hose->last_busno = 0xff;
}
static void __init parse_region_decode(struct pci_controller *hose,
u32 decode)
{
unsigned long base, end, next = -1;
int i, cur = -1;
/* Iterate through all bits. We ignore the last bit as this region is
* reserved for the ROM among other niceties
*/
for (i = 0; i < 31; i++) {
if ((decode & (0x80000000 >> i)) == 0)
continue;
if (i < 16) {
base = 0xf0000000 | (((u32)i) << 24);
end = base + 0x00ffffff;
} else {
base = ((u32)i-16) << 28;
end = base + 0x0fffffff;
}
if (base != next) {
if (++cur >= 3) {
printk(KERN_WARNING "PCI: Too many ranges !\n");
break;
}
hose->mem_resources[cur].flags = IORESOURCE_MEM;
hose->mem_resources[cur].name = hose->dn->full_name;
hose->mem_resources[cur].start = base;
hose->mem_resources[cur].end = end;
DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
} else {
DBG(" : -0x%08lx\n", end);
hose->mem_resources[cur].end = end;
}
next = end + 1;
}
}
static void __init setup_u3_ht(struct pci_controller* hose)
{
struct device_node *np = hose->dn;
struct resource cfg_res, self_res;
u32 decode;
hose->ops = &u3_ht_pci_ops;
/* Get base addresses from OF tree
*/
if (of_address_to_resource(np, 0, &cfg_res) ||
of_address_to_resource(np, 1, &self_res)) {
printk(KERN_ERR "PCI: Failed to get U3/U4 HT resources !\n");
return;
}
/* Map external cfg space access into cfg_data and self registers
* into cfg_addr
*/
hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res));
/*
* /ht node doesn't expose a "ranges" property, we read the register
* that controls the decoding logic and use that for memory regions.
* The IO region is hard coded since it is fixed in HW as well.
*/
hose->io_base_phys = 0xf4000000;
hose->pci_io_size = 0x00400000;
hose->io_resource.name = np->full_name;
hose->io_resource.start = 0;
hose->io_resource.end = 0x003fffff;
hose->io_resource.flags = IORESOURCE_IO;
hose->pci_mem_offset = 0;
hose->first_busno = 0;
hose->last_busno = 0xef;
/* Note: fix offset when cfg_addr becomes a void * */
decode = in_be32(hose->cfg_addr + 0x80);
DBG("PCI: Apple HT bridge decode register: 0x%08x\n", decode);
/* NOTE: The decode register setup is a bit weird... region
* 0xf8000000 for example is marked as enabled in there while it's
& actually the memory controller registers.
* That means that we are incorrectly attributing it to HT.
*
* In a similar vein, region 0xf4000000 is actually the HT IO space but
* also marked as enabled in here and 0xf9000000 is used by some other
* internal bits of the northbridge.
*
* Unfortunately, we can't just mask out those bit as we would end
* up with more regions than we can cope (linux can only cope with
* 3 memory regions for a PHB at this stage).
*
* So for now, we just do a little hack. We happen to -know- that
* Apple firmware doesn't assign things below 0xfa000000 for that
* bridge anyway so we mask out all bits we don't want.
*/
decode &= 0x003fffff;
/* Now parse the resulting bits and build resources */
parse_region_decode(hose, decode);
}
#endif /* CONFIG_PPC64 */
/*
* We assume that if we have a G3 powermac, we have one bridge called
* "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
* if we have one or more bandit or chaos bridges, we don't have a MPC106.
*/
static int __init pmac_add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
char *disp_name;
const int *bus_range;
int primary = 1, has_address = 0;
DBG("Adding PCI host bridge %s\n", dev->full_name);
/* Fetch host bridge registers address */
has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
}
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
disp_name = NULL;
/* 64 bits only bridges */
#ifdef CONFIG_PPC64
if (of_device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose);
disp_name = "U3-AGP";
primary = 0;
} else if (of_device_is_compatible(dev, "u3-ht")) {
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
} else if (of_device_is_compatible(dev, "u4-pcie")) {
setup_u4_pcie(hose);
disp_name = "U4-PCIE";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:"
" %d->%d\n", disp_name, hose->first_busno, hose->last_busno);
#endif /* CONFIG_PPC64 */
/* 32 bits only bridges */
#ifdef CONFIG_PPC32
if (of_device_is_compatible(dev, "uni-north")) {
primary = setup_uninorth(hose, &rsrc);
disp_name = "UniNorth";
} else if (strcmp(dev->name, "pci") == 0) {
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
} else if (strcmp(dev->name, "bandit") == 0) {
setup_bandit(hose, &rsrc);
disp_name = "Bandit";
} else if (strcmp(dev->name, "chaos") == 0) {
setup_chaos(hose, &rsrc);
disp_name = "Chaos";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
disp_name, (unsigned long long)rsrc.start, hose->first_busno,
hose->last_busno);
#endif /* CONFIG_PPC32 */
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
return 0;
}
void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
{
#ifdef CONFIG_PPC32
/* Fixup interrupt for the modem/ethernet combo controller.
* on machines with a second ohare chip.
* The number in the device tree (27) is bogus (correct for
* the ethernet-only board but not the combo ethernet/modem
* board). The real interrupt is 28 on the second controller
* -> 28+32 = 60.
*/
if (has_second_ohare &&
dev->vendor == PCI_VENDOR_ID_DEC &&
dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
dev->irq = irq_create_mapping(NULL, 60);
irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
#endif /* CONFIG_PPC32 */
}
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
struct device_node *ht = NULL;
pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_CRIT "pmac_pci_init: can't find root "
"of device tree\n");
return;
}
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
if (np->name == NULL)
continue;
if (strcmp(np->name, "bandit") == 0
|| strcmp(np->name, "chaos") == 0
|| strcmp(np->name, "pci") == 0) {
if (pmac_add_bridge(np) == 0)
of_node_get(np);
}
if (strcmp(np->name, "ht") == 0) {
of_node_get(np);
ht = np;
}
}
of_node_put(root);
#ifdef CONFIG_PPC64
/* Probe HT last as it relies on the agp resources to be already
* setup
*/
if (ht && pmac_add_bridge(ht) != 0)
of_node_put(ht);
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
* safe assumptions for now. We should do something better in the
* future though
*/
if (u3_agp) {
struct device_node *np = u3_agp->dn;
PCI_DN(np)->busno = 0xf0;
for (np = np->child; np; np = np->sibling)
PCI_DN(np)->busno = 0xf0;
}
/* pmac_check_ht_link(); */
#else /* CONFIG_PPC64 */
init_p2pbridge();
init_second_ohare();
fixup_nec_usb2();
/* We are still having some issues with the Xserve G4, enabling
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pcibios_assign_bus_offset = 0x10;
#endif
}
#ifdef CONFIG_PPC32
int pmac_pci_enable_device_hook(struct pci_dev *dev)
{
struct device_node* node;
int updatecfg = 0;
int uninorth_child;
node = pci_device_to_OF_node(dev);
/* We don't want to enable USB controllers absent from the OF tree
* (iBook second controller)
*/
if (dev->vendor == PCI_VENDOR_ID_APPLE
&& dev->class == PCI_CLASS_SERIAL_USB_OHCI
&& !node) {
printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
pci_name(dev));
return -EINVAL;
}
if (!node)
return 0;
uninorth_child = node->parent &&
of_device_is_compatible(node->parent, "uni-north");
/* Firewire & GMAC were disabled after PCI probe, the driver is
* claiming them, we must re-enable them now.
*/
if (uninorth_child && !strcmp(node->name, "firewire") &&
(of_device_is_compatible(node, "pci106b,18") ||
of_device_is_compatible(node, "pci106b,30") ||
of_device_is_compatible(node, "pci11c1,5811"))) {
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
updatecfg = 1;
}
if (uninorth_child && !strcmp(node->name, "ethernet") &&
of_device_is_compatible(node, "gmac")) {
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
updatecfg = 1;
}
/*
* Fixup various header fields on 32 bits. We don't do that on
* 64 bits as some of these have strange values behind the HT
* bridge and we must not, for example, enable MWI or set the
* cache line size on them.
*/
if (updatecfg) {
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
L1_CACHE_BYTES >> 2);
}
return 0;
}
void __devinit pmac_pci_fixup_ohci(struct pci_dev *dev)
{
struct device_node *node = pci_device_to_OF_node(dev);
/* We don't want to assign resources to USB controllers
* absent from the OF tree (iBook second controller)
*/
if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node)
dev->resource[0].flags = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_ANY_ID, pmac_pci_fixup_ohci);
/* We power down some devices after they have been probed. They'll
* be powered back on later on
*/
void __init pmac_pcibios_after_init(void)
{
struct device_node* nd;
for_each_node_by_name(nd, "firewire") {
if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") ||
of_device_is_compatible(nd, "pci106b,30") ||
of_device_is_compatible(nd, "pci11c1,5811"))
&& of_device_is_compatible(nd->parent, "uni-north")) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
}
}
for_each_node_by_name(nd, "ethernet") {
if (nd->parent && of_device_is_compatible(nd, "gmac")
&& of_device_is_compatible(nd->parent, "uni-north"))
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
}
}
void pmac_pci_fixup_cardbus(struct pci_dev* dev)
{
if (!machine_is(powermac))
return;
/*
* Fix the interrupt routing on the various cardbus bridges
* used on powerbooks
*/
if (dev->vendor != PCI_VENDOR_ID_TI)
return;
if (dev->device == PCI_DEVICE_ID_TI_1130 ||
dev->device == PCI_DEVICE_ID_TI_1131) {
u8 val;
/* Enable PCI interrupt */
if (pci_read_config_byte(dev, 0x91, &val) == 0)
pci_write_config_byte(dev, 0x91, val | 0x30);
/* Disable ISA interrupt mode */
if (pci_read_config_byte(dev, 0x92, &val) == 0)
pci_write_config_byte(dev, 0x92, val & ~0x06);
}
if (dev->device == PCI_DEVICE_ID_TI_1210 ||
dev->device == PCI_DEVICE_ID_TI_1211 ||
dev->device == PCI_DEVICE_ID_TI_1410 ||
dev->device == PCI_DEVICE_ID_TI_1510) {
u8 val;
/* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
signal out the MFUNC0 pin */
if (pci_read_config_byte(dev, 0x8c, &val) == 0)
pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
/* Disable ISA interrupt mode */
if (pci_read_config_byte(dev, 0x92, &val) == 0)
pci_write_config_byte(dev, 0x92, val & ~0x06);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
void pmac_pci_fixup_pciata(struct pci_dev* dev)
{
u8 progif = 0;
/*
* On PowerMacs, we try to switch any PCI ATA controller to
* fully native mode
*/
if (!machine_is(powermac))
return;
/* Some controllers don't have the class IDE */
if (dev->vendor == PCI_VENDOR_ID_PROMISE)
switch(dev->device) {
case PCI_DEVICE_ID_PROMISE_20246:
case PCI_DEVICE_ID_PROMISE_20262:
case PCI_DEVICE_ID_PROMISE_20263:
case PCI_DEVICE_ID_PROMISE_20265:
case PCI_DEVICE_ID_PROMISE_20267:
case PCI_DEVICE_ID_PROMISE_20268:
case PCI_DEVICE_ID_PROMISE_20269:
case PCI_DEVICE_ID_PROMISE_20270:
case PCI_DEVICE_ID_PROMISE_20271:
case PCI_DEVICE_ID_PROMISE_20275:
case PCI_DEVICE_ID_PROMISE_20276:
case PCI_DEVICE_ID_PROMISE_20277:
goto good;
}
/* Others, check PCI class */
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
good:
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 5) != 5) {
printk(KERN_INFO "PCI: %s Forcing PCI IDE into native mode\n",
pci_name(dev));
(void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
(progif & 5) != 5)
printk(KERN_ERR "Rewrite of PROGIF failed !\n");
else {
/* Clear IO BARs, they will be reassigned */
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, 0);
}
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
#endif /* CONFIG_PPC32 */
/*
* Disable second function on K2-SATA, it's broken
* and disable IO BARs on first one
*/
static void fixup_k2_sata(struct pci_dev* dev)
{
int i;
u16 cmd;
if (PCI_FUNC(dev->devfn) > 0) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(dev, PCI_COMMAND, cmd);
for (i = 0; i < 6; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
0);
}
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~PCI_COMMAND_IO;
pci_write_config_word(dev, PCI_COMMAND, cmd);
for (i = 0; i < 5; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
0);
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
/*
* On U4 (aka CPC945) the PCIe root complex "P2P" bridge resource ranges aren't
* configured by the firmware. The bridge itself seems to ignore them but it
* causes problems with Linux which then re-assigns devices below the bridge,
* thus changing addresses of those devices from what was in the device-tree,
* which sucks when those are video cards using offb
*
* We could just mark it transparent but I prefer fixing up the resources to
* properly show what's going on here, as I have some doubts about having them
* badly configured potentially being an issue for DMA.
*
* We leave PIO alone, it seems to be fine
*
* Oh and there's another funny bug. The OF properties advertize the region
* 0xf1000000..0xf1ffffff as being forwarded as memory space. But that's
* actually not true, this region is the memory mapped config space. So we
* also need to filter it out or we'll map things in the wrong place.
*/
static void fixup_u4_pcie(struct pci_dev* dev)
{
struct pci_controller *host = pci_bus_to_host(dev->bus);
struct resource *region = NULL;
u32 reg;
int i;
/* Only do that on PowerMac */
if (!machine_is(powermac))
return;
/* Find the largest MMIO region */
for (i = 0; i < 3; i++) {
struct resource *r = &host->mem_resources[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
/* Skip the 0xf0xxxxxx..f2xxxxxx regions, we know they
* are reserved by HW for other things
*/
if (r->start >= 0xf0000000 && r->start < 0xf3000000)
continue;
if (!region || resource_size(r) > resource_size(region))
region = r;
}
/* Nothing found, bail */
if (region == 0)
return;
/* Print things out */
printk(KERN_INFO "PCI: Fixup U4 PCIe bridge range: %pR\n", region);
/* Fixup bridge config space. We know it's a Mac, resource aren't
* offset so let's just blast them as-is. We also know that they
* fit in 32 bits
*/
reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000);
pci_write_config_dword(dev, PCI_MEMORY_BASE, reg);
pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0);
pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
| gpl-2.0 |
Cl3Kener/UBER-L | drivers/acpi/acpica/evsci.c | 4852 | 6233 | /*******************************************************************************
*
* Module Name: evsci - System Control Interrupt configuration and
* legacy to ACPI mode state transition functions
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evsci")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/* Local prototypes */
static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_sci_xrupt_handler
*
* PARAMETERS: Context - Calling Context
*
* RETURN: Status code indicates whether interrupt was handled.
*
* DESCRIPTION: Interrupt handler that will figure out what function or
* control method to call to deal with a SCI.
*
******************************************************************************/
static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
{
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/*
* Fixed Events:
* Check for and dispatch any Fixed Events that have occurred
*/
interrupt_handled |= acpi_ev_fixed_event_detect();
/*
* General Purpose Events:
* Check for and dispatch any GPEs that have occurred
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_UINT32(interrupt_handled);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_xrupt_handler
*
* PARAMETERS: Context - Calling Context
*
* RETURN: Status code indicates whether interrupt was handled.
*
* DESCRIPTION: Handler for GPE Block Device interrupts
*
******************************************************************************/
u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
{
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/* GPEs: Check for and dispatch any GPEs that have occurred */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_UINT32(interrupt_handled);
}
/******************************************************************************
*
* FUNCTION: acpi_ev_install_sci_handler
*
* PARAMETERS: none
*
* RETURN: Status
*
* DESCRIPTION: Installs SCI handler.
*
******************************************************************************/
u32 acpi_ev_install_sci_handler(void)
{
u32 status = AE_OK;
ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status =
acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler,
acpi_gbl_gpe_xrupt_list_head);
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_ev_remove_sci_handler
*
* PARAMETERS: none
*
* RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
* installed to begin with
*
* DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
* taken.
*
* Note: It doesn't seem important to disable all events or set the event
* enable registers to their original values. The OS should disable
* the SCI interrupt level when the handler is removed, so no more
* events will come in.
*
******************************************************************************/
acpi_status acpi_ev_remove_sci_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
/* Just let the OS remove the handler and disable the level */
status =
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler);
return_ACPI_STATUS(status);
}
#endif /* !ACPI_REDUCED_HARDWARE */
| gpl-2.0 |
googyanas/Googy-Max4-Kernel | drivers/scsi/aacraid/sa.c | 4852 | 10338 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* sa.c
*
* Abstract: Drawbridge specific support functions
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
static irqreturn_t aac_sa_intr(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
unsigned short intstat, mask;
intstat = sa_readw(dev, DoorbellReg_p);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have been enabled.
*/
mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask) {
if (intstat & PrintfReady) {
aac_printf(dev, sa_readl(dev, Mailbox5));
sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
sa_writew(dev, DoorbellReg_s, PrintfDone);
} else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
} else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
} else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
} else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* aac_sa_disable_interrupt - disable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_disable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
}
/**
* aac_sa_enable_interrupt - enable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_enable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
}
/**
* aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for
* @event: Event to notidy
*
* Notify the adapter of an event
*/
static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
{
switch (event) {
case AdapNormCmdQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_1);
break;
case HostNormRespNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_4);
break;
case AdapNormRespQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_2);
break;
case HostNormCmdNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_3);
break;
case HostShutdown:
/*
sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
*/
break;
case FastIo:
sa_writew(dev, DoorbellReg_s,DOORBELL_6);
break;
case AdapPrintfDone:
sa_writew(dev, DoorbellReg_s,DOORBELL_5);
break;
default:
BUG();
break;
}
}
/**
* sa_sync_cmd - send a command and wait
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
* @ret: adapter status
*
* This routine will send a synchronous command to the adapter and wait
* for its completion.
*/
static int sa_sync_cmd(struct aac_dev *dev, u32 command,
u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
{
unsigned long start;
int ok;
/*
* Write the Command into Mailbox 0
*/
sa_writel(dev, Mailbox0, command);
/*
* Write the parameters into Mailboxes 1 - 4
*/
sa_writel(dev, Mailbox1, p1);
sa_writel(dev, Mailbox2, p2);
sa_writel(dev, Mailbox3, p3);
sa_writel(dev, Mailbox4, p4);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Signal that there is a new synch command
*/
sa_writew(dev, DoorbellReg_s, DOORBELL_0);
ok = 0;
start = jiffies;
while(time_before(jiffies, start+30*HZ))
{
/*
* Delay 5uS so that the monitor gets access
*/
udelay(5);
/*
* Mon110 will set doorbell0 bit when it has
* completed the command.
*/
if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
ok = 1;
break;
}
msleep(1);
}
if (ok != 1)
return -ETIMEDOUT;
/*
* Clear the synch command doorbell.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Pull the synch status from Mailbox 0.
*/
if (ret)
*ret = sa_readl(dev, Mailbox0);
if (r1)
*r1 = sa_readl(dev, Mailbox1);
if (r2)
*r2 = sa_readl(dev, Mailbox2);
if (r3)
*r3 = sa_readl(dev, Mailbox3);
if (r4)
*r4 = sa_readl(dev, Mailbox4);
return 0;
}
/**
* aac_sa_interrupt_adapter - interrupt an adapter
* @dev: Which adapter to enable.
*
* Breakpoint an adapter.
*/
static void aac_sa_interrupt_adapter (struct aac_dev *dev)
{
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
/**
* aac_sa_start_adapter - activate adapter
* @dev: Adapter
*
* Start up processing on an ARM based AAC adapter
*/
static void aac_sa_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
{
return -EINVAL;
}
/**
* aac_sa_check_health
* @dev: device to check if healthy
*
* Will attempt to determine if the specified adapter is alive and
* capable of handling requests, returning 0 if alive.
*/
static int aac_sa_check_health(struct aac_dev *dev)
{
long status = sa_readl(dev, Mailbox7);
/*
* Check to see if the board failed any self tests.
*/
if (status & SELF_TEST_FAILED)
return -1;
/*
* Check to see if the board panic'd while booting.
*/
if (status & KERNEL_PANIC)
return -2;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
if (!(status & KERNEL_UP_AND_RUNNING))
return -3;
/*
* Everything is OK
*/
return 0;
}
/**
* aac_sa_ioremap
* @size: mapping resize request
*
*/
static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
{
if (!size) {
iounmap(dev->regs.sa);
return 0;
}
dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
return (dev->base == NULL) ? -1 : 0;
}
/**
* aac_sa_init - initialize an ARM based AAC card
* @dev: device to configure
*
* Allocate and set up resources for the ARM based AAC variants. The
* device_interface in the commregion will be allocated and linked
* to the comm region.
*/
int aac_sa_init(struct aac_dev *dev)
{
unsigned long start;
unsigned long status;
int instance;
const char *name;
instance = dev->id;
name = dev->name;
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
}
/*
* Check to see if the board failed any self tests.
*/
if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
goto error_iounmap;
}
/*
* Check to see if the board panic'd while booting.
*/
if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
goto error_iounmap;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes.
*/
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
if (time_after(jiffies, start+startup_timeout*HZ)) {
status = sa_readl(dev, Mailbox7);
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
name, instance, status);
goto error_iounmap;
}
msleep(1);
}
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health;
dev->a_ops.adapter_restart = aac_sa_restart_adapter;
dev->a_ops.adapter_intr = aac_sa_intr;
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
/*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
aac_adapter_disable_int(dev);
aac_adapter_enable_int(dev);
if(aac_init_adapter(dev) == NULL)
goto error_irq;
dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED,
"aacraid", (void *)dev ) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
dev->dbg_base = dev->scsi_host_ptr->base;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configure, and it can start
* accepting requests
*/
aac_sa_start_adapter(dev);
return 0;
error_irq:
aac_sa_disable_interrupt(dev);
free_irq(dev->pdev->irq, (void *)dev);
error_iounmap:
return -1;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.