repo_name
string
path
string
copies
string
size
string
content
string
license
string
jduhamel/linux
arch/xtensa/variants/s6000/dmac.c
3285
4808
/* * Authors: Oskar Schirmer <oskar@scara.com> * Daniel Gloeckner <dg@emlix.com> * (c) 2008 emlix GmbH http://www.emlix.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <asm/cacheflush.h> #include <variant/dmac.h> /* DMA engine lookup */ struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB]; /* DMA control, per engine */ void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size) { if (xtensa_need_flush_dma_source(src)) { u32 base = src; u32 span = size; u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK); if (chunk && (size > chunk)) { s32 skip = readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP); u32 gaps = (size+chunk-1)/chunk - 1; if (skip >= 0) { span += gaps * skip; } else if (-skip > chunk) { s32 decr = gaps * (chunk + skip); base += decr; span = chunk - decr; } else { span = max(span + gaps * skip, (chunk + skip) * gaps - skip); } } flush_dcache_unaligned(base, span); } if (xtensa_need_invalidate_dma_destination(dst)) { u32 base = dst; u32 span = size; u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK); if (chunk && (size > chunk)) { s32 skip = readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP); u32 gaps = (size+chunk-1)/chunk - 1; if (skip >= 0) { span += gaps * skip; } else if (-skip > chunk) { s32 decr = gaps * (chunk + skip); base += decr; span = chunk - decr; } else { span = max(span + gaps * skip, (chunk + skip) * gaps - skip); } } invalidate_dcache_unaligned(base, span); } s6dmac_put_fifo(dmac, chan, src, dst, size); } void s6dmac_disable_error_irqs(u32 dmac, u32 mask) { unsigned long flags; spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock; spin_lock_irqsave(spinl, flags); _s6dmac_disable_error_irqs(dmac, mask); spin_unlock_irqrestore(spinl, flags); } u32 s6dmac_int_sources(u32 dmac, u32 channel) { u32 mask, ret, tmp; mask = 1 << channel; tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT); tmp &= mask; writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR); ret = tmp >> channel; tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT); tmp &= mask; writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR); ret |= (tmp >> channel) << 1; tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT); tmp &= mask; writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR); ret |= (tmp >> channel) << 2; tmp = readl(dmac + S6_DMA_INTRAW0); tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER); writel(tmp, dmac + S6_DMA_INTCLEAR0); if (tmp & (mask << S6_DMA_INT0_UNDER)) ret |= 1 << 3; if (tmp & (mask << S6_DMA_INT0_OVER)) ret |= 1 << 4; tmp = readl(dmac + S6_DMA_MASTERERRINFO); mask <<= S6_DMA_INT1_CHANNEL; if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK) == channel) mask |= 1 << S6_DMA_INT1_MASTER; if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK) == channel) mask |= 1 << (S6_DMA_INT1_MASTER + 1); if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK) == channel) mask |= 1 << (S6_DMA_INT1_MASTER + 2); tmp = readl(dmac + S6_DMA_INTRAW1) & mask; writel(tmp, dmac + S6_DMA_INTCLEAR1); ret |= ((tmp >> channel) & 1) << 5; ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6; return ret; } void s6dmac_release_chan(u32 dmac, int chan) { if (chan >= 0) s6dmac_disable_chan(dmac, chan); } /* global init */ static inline void __init dmac_init(u32 dmac, u8 chan_nb) { s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac; spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock); s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb; writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER, dmac + S6_DMA_INTCLEAR1); } static inline void __init dmac_master(u32 dmac, u32 m0start, u32 m0end, u32 m1start, u32 m1end) { writel(m0start, dmac + S6_DMA_MASTER0START); writel(m0end - 1, dmac + S6_DMA_MASTER0END); writel(m1start, dmac + S6_DMA_MASTER1START); writel(m1end - 1, dmac + S6_DMA_MASTER1END); } static void __init s6_dmac_init(void) { dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB); dmac_master(S6_REG_LMSDMA, S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC); dmac_init(S6_REG_NIDMA, S6_NIDMA_NB); dmac_init(S6_REG_DPDMA, S6_DPDMA_NB); dmac_master(S6_REG_DPDMA, S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA); dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB); dmac_master(S6_REG_HIFDMA, S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX); } arch_initcall(s6_dmac_init);
gpl-2.0
ivanmeler/android_kernel_samsung_smdk4412
drivers/isdn/hisax/hfc_2bs0.c
3285
15669
/* $Id: hfc_2bs0.c,v 1.20.2.6 2004/02/11 13:21:33 keil Exp $ * * specific routines for CCD's HFC 2BS0 * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "hfc_2bs0.h" #include "isac.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/slab.h> static inline int WaitForBusy(struct IsdnCardState *cs) { int to = 130; u_char val; while (!(cs->BC_Read_Reg(cs, HFC_STATUS, 0) & HFC_BUSY) && to) { val = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2 | (cs->hw.hfc.cip & 3)); udelay(1); to--; } if (!to) { printk(KERN_WARNING "HiSax: waitforBusy timeout\n"); return (0); } else return (to); } static inline int WaitNoBusy(struct IsdnCardState *cs) { int to = 125; while ((cs->BC_Read_Reg(cs, HFC_STATUS, 0) & HFC_BUSY) && to) { udelay(1); to--; } if (!to) { printk(KERN_WARNING "HiSax: waitforBusy timeout\n"); return (0); } else return (to); } static int GetFreeFifoBytes(struct BCState *bcs) { int s; if (bcs->hw.hfc.f1 == bcs->hw.hfc.f2) return (bcs->cs->hw.hfc.fifosize); s = bcs->hw.hfc.send[bcs->hw.hfc.f1] - bcs->hw.hfc.send[bcs->hw.hfc.f2]; if (s <= 0) s += bcs->cs->hw.hfc.fifosize; s = bcs->cs->hw.hfc.fifosize - s; return (s); } static int ReadZReg(struct BCState *bcs, u_char reg) { int val; WaitNoBusy(bcs->cs); val = 256 * bcs->cs->BC_Read_Reg(bcs->cs, HFC_DATA, reg | HFC_CIP | HFC_Z_HIGH); WaitNoBusy(bcs->cs); val += bcs->cs->BC_Read_Reg(bcs->cs, HFC_DATA, reg | HFC_CIP | HFC_Z_LOW); return (val); } static void hfc_clear_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int idx, cnt; int rcnt, z1, z2; u_char cip, f1, f2; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hfc_clear_fifo"); cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel); if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) { cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip); WaitForBusy(cs); } WaitNoBusy(cs); f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip); cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel); WaitNoBusy(cs); f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip); z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel)); z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel)); cnt = 32; while (((f1 != f2) || (z1 != z2)) && cnt--) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc clear %d f1(%d) f2(%d)", bcs->channel, f1, f2); rcnt = z1 - z2; if (rcnt < 0) rcnt += cs->hw.hfc.fifosize; if (rcnt) rcnt++; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc clear %d z1(%x) z2(%x) cnt(%d)", bcs->channel, z1, z2, rcnt); cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel); idx = 0; while ((idx < rcnt) && WaitNoBusy(cs)) { cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip); idx++; } if (f1 != f2) { WaitNoBusy(cs); cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC | HFC_CHANNEL(bcs->channel)); WaitForBusy(cs); } cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel); WaitNoBusy(cs); f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip); cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel); WaitNoBusy(cs); f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip); z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel)); z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel)); } return; } static struct sk_buff * hfc_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct sk_buff *skb; struct IsdnCardState *cs = bcs->cs; int idx; int chksum; u_char stat, cip; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hfc_empty_fifo"); idx = 0; if (count > HSCX_BUFMAX + 3) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfc_empty_fifo: incoming packet too large"); cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel); while ((idx++ < count) && WaitNoBusy(cs)) cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip); WaitNoBusy(cs); stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC | HFC_CHANNEL(bcs->channel)); WaitForBusy(cs); return (NULL); } if ((count < 4) && (bcs->mode != L1_MODE_TRANS)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfc_empty_fifo: incoming packet too small"); cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel); while ((idx++ < count) && WaitNoBusy(cs)) cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip); WaitNoBusy(cs); stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC | HFC_CHANNEL(bcs->channel)); WaitForBusy(cs); #ifdef ERROR_STATISTIC bcs->err_inv++; #endif return (NULL); } if (bcs->mode == L1_MODE_TRANS) count -= 1; else count -= 3; if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "HFC: receive out of memory\n"); else { ptr = skb_put(skb, count); idx = 0; cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel); while ((idx < count) && WaitNoBusy(cs)) { *ptr++ = cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip); idx++; } if (idx != count) { debugl1(cs, "RFIFO BUSY error"); printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel); dev_kfree_skb_any(skb); if (bcs->mode != L1_MODE_TRANS) { WaitNoBusy(cs); stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC | HFC_CHANNEL(bcs->channel)); WaitForBusy(cs); } return (NULL); } if (bcs->mode != L1_MODE_TRANS) { WaitNoBusy(cs); chksum = (cs->BC_Read_Reg(cs, HFC_DATA, cip) << 8); WaitNoBusy(cs); chksum += cs->BC_Read_Reg(cs, HFC_DATA, cip); WaitNoBusy(cs); stat = cs->BC_Read_Reg(cs, HFC_DATA, cip); if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc_empty_fifo %d chksum %x stat %x", bcs->channel, chksum, stat); if (stat) { debugl1(cs, "FIFO CRC error"); dev_kfree_skb_any(skb); skb = NULL; #ifdef ERROR_STATISTIC bcs->err_crc++; #endif } WaitNoBusy(cs); stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC | HFC_CHANNEL(bcs->channel)); WaitForBusy(cs); } } return (skb); } static void hfc_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int idx, fcnt; int count; int z1, z2; u_char cip; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; cip = HFC_CIP | HFC_F1 | HFC_SEND | HFC_CHANNEL(bcs->channel); if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) { cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip); WaitForBusy(cs); } WaitNoBusy(cs); if (bcs->mode != L1_MODE_TRANS) { bcs->hw.hfc.f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip); cip = HFC_CIP | HFC_F2 | HFC_SEND | HFC_CHANNEL(bcs->channel); WaitNoBusy(cs); bcs->hw.hfc.f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip); bcs->hw.hfc.send[bcs->hw.hfc.f1] = ReadZReg(bcs, HFC_Z1 | HFC_SEND | HFC_CHANNEL(bcs->channel)); if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc_fill_fifo %d f1(%d) f2(%d) z1(%x)", bcs->channel, bcs->hw.hfc.f1, bcs->hw.hfc.f2, bcs->hw.hfc.send[bcs->hw.hfc.f1]); fcnt = bcs->hw.hfc.f1 - bcs->hw.hfc.f2; if (fcnt < 0) fcnt += 32; if (fcnt > 30) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc_fill_fifo more as 30 frames"); return; } count = GetFreeFifoBytes(bcs); } else { WaitForBusy(cs); z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel)); z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel)); count = z1 - z2; if (count < 0) count += cs->hw.hfc.fifosize; } /* L1_MODE_TRANS */ if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc_fill_fifo %d count(%u/%d)", bcs->channel, bcs->tx_skb->len, count); if (count < bcs->tx_skb->len) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc_fill_fifo no fifo mem"); return; } cip = HFC_CIP | HFC_FIFO_IN | HFC_SEND | HFC_CHANNEL(bcs->channel); idx = 0; while ((idx < bcs->tx_skb->len) && WaitNoBusy(cs)) cs->BC_Write_Reg(cs, HFC_DATA_NODEB, cip, bcs->tx_skb->data[idx++]); if (idx != bcs->tx_skb->len) { debugl1(cs, "FIFO Send BUSY error"); printk(KERN_WARNING "HFC S FIFO channel %d BUSY Error\n", bcs->channel); } else { count = bcs->tx_skb->len; bcs->tx_cnt -= count; if (PACKET_NOACK == bcs->tx_skb->pkt_type) count = -1; dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; if (bcs->mode != L1_MODE_TRANS) { WaitForBusy(cs); WaitNoBusy(cs); cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F1_INC | HFC_SEND | HFC_CHANNEL(bcs->channel)); } if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) && (count >= 0)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } return; } void main_irq_hfc(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int z1, z2, rcnt; u_char f1, f2, cip; int receive, transmit, count = 5; struct sk_buff *skb; Begin: count--; cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel); if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) { cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip); WaitForBusy(cs); } WaitNoBusy(cs); receive = 0; if (bcs->mode == L1_MODE_HDLC) { f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip); cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel); WaitNoBusy(cs); f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip); if (f1 != f2) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc rec %d f1(%d) f2(%d)", bcs->channel, f1, f2); receive = 1; } } if (receive || (bcs->mode == L1_MODE_TRANS)) { WaitForBusy(cs); z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel)); z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel)); rcnt = z1 - z2; if (rcnt < 0) rcnt += cs->hw.hfc.fifosize; if ((bcs->mode == L1_MODE_HDLC) || (rcnt)) { rcnt++; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfc rec %d z1(%x) z2(%x) cnt(%d)", bcs->channel, z1, z2, rcnt); /* sti(); */ if ((skb = hfc_empty_fifo(bcs, rcnt))) { skb_queue_tail(&bcs->rqueue, skb); schedule_event(bcs, B_RCVBUFREADY); } } receive = 1; } if (bcs->tx_skb) { transmit = 1; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); hfc_fill_fifo(bcs); if (test_bit(BC_FLG_BUSY, &bcs->Flag)) transmit = 0; } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { transmit = 1; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); hfc_fill_fifo(bcs); if (test_bit(BC_FLG_BUSY, &bcs->Flag)) transmit = 0; } else { transmit = 0; schedule_event(bcs, B_XMTBUFREADY); } } if ((receive || transmit) && count) goto Begin; return; } static void mode_hfc(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HFC 2BS0 mode %d bchan %d/%d", mode, bc, bcs->channel); bcs->mode = mode; bcs->channel = bc; switch (mode) { case (L1_MODE_NULL): if (bc) { cs->hw.hfc.ctmt &= ~1; cs->hw.hfc.isac_spcr &= ~0x03; } else { cs->hw.hfc.ctmt &= ~2; cs->hw.hfc.isac_spcr &= ~0x0c; } break; case (L1_MODE_TRANS): cs->hw.hfc.ctmt &= ~(1 << bc); /* set HDLC mode */ cs->BC_Write_Reg(cs, HFC_STATUS, cs->hw.hfc.ctmt, cs->hw.hfc.ctmt); hfc_clear_fifo(bcs); /* complete fifo clear */ if (bc) { cs->hw.hfc.ctmt |= 1; cs->hw.hfc.isac_spcr &= ~0x03; cs->hw.hfc.isac_spcr |= 0x02; } else { cs->hw.hfc.ctmt |= 2; cs->hw.hfc.isac_spcr &= ~0x0c; cs->hw.hfc.isac_spcr |= 0x08; } break; case (L1_MODE_HDLC): if (bc) { cs->hw.hfc.ctmt &= ~1; cs->hw.hfc.isac_spcr &= ~0x03; cs->hw.hfc.isac_spcr |= 0x02; } else { cs->hw.hfc.ctmt &= ~2; cs->hw.hfc.isac_spcr &= ~0x0c; cs->hw.hfc.isac_spcr |= 0x08; } break; } cs->BC_Write_Reg(cs, HFC_STATUS, cs->hw.hfc.ctmt, cs->hw.hfc.ctmt); cs->writeisac(cs, ISAC_SPCR, cs->hw.hfc.isac_spcr); if (mode == L1_MODE_HDLC) hfc_clear_fifo(bcs); } static void hfc_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n"); } else { test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); mode_hfc(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); mode_hfc(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_hfcstate(struct BCState *bcs) { mode_hfc(bcs, 0, bcs->channel); if (test_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); } static int open_hfcstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->tx_cnt = 0; return (0); } static int setstack_hfc(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hfcstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hfc_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } static void init_send(struct BCState *bcs) { int i; if (!(bcs->hw.hfc.send = kmalloc(32 * sizeof(unsigned int), GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for hfc.send\n"); return; } for (i = 0; i < 32; i++) bcs->hw.hfc.send[i] = 0x1fff; } void inithfc(struct IsdnCardState *cs) { init_send(&cs->bcs[0]); init_send(&cs->bcs[1]); cs->BC_Send_Data = &hfc_fill_fifo; cs->bcs[0].BC_SetStack = setstack_hfc; cs->bcs[1].BC_SetStack = setstack_hfc; cs->bcs[0].BC_Close = close_hfcstate; cs->bcs[1].BC_Close = close_hfcstate; mode_hfc(cs->bcs, 0, 0); mode_hfc(cs->bcs + 1, 0, 0); } void releasehfc(struct IsdnCardState *cs) { kfree(cs->bcs[0].hw.hfc.send); cs->bcs[0].hw.hfc.send = NULL; kfree(cs->bcs[1].hw.hfc.send); cs->bcs[1].hw.hfc.send = NULL; }
gpl-2.0
CoolDevelopment/MoshKernel-amami
arch/arm/plat-omap/devices.c
4821
5595
/* * linux/arch/arm/plat-omap/devices.c * * Common platform device setup/initialization for OMAP1 and OMAP2 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/memblock.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/memblock.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/mmc.h> #include <plat/menelaus.h> #include <plat/omap44xx.h> #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \ defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) #define OMAP_MMC_NR_RES 2 /* * Register MMC devices. Called from mach-omap1 and mach-omap2 device init. */ int __init omap_mmc_add(const char *name, int id, unsigned long base, unsigned long size, unsigned int irq, struct omap_mmc_platform_data *data) { struct platform_device *pdev; struct resource res[OMAP_MMC_NR_RES]; int ret; pdev = platform_device_alloc(name, id); if (!pdev) return -ENOMEM; memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource)); res[0].start = base; res[0].end = base + size - 1; res[0].flags = IORESOURCE_MEM; res[1].start = res[1].end = irq; res[1].flags = IORESOURCE_IRQ; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret == 0) ret = platform_device_add_data(pdev, data, sizeof(*data)); if (ret) goto fail; ret = platform_device_add(pdev); if (ret) goto fail; /* return device handle to board setup code */ data->dev = &pdev->dev; return 0; fail: platform_device_put(pdev); return ret; } #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE) #ifdef CONFIG_ARCH_OMAP2 #define OMAP_RNG_BASE 0x480A0000 #else #define OMAP_RNG_BASE 0xfffe5000 #endif static struct resource rng_resources[] = { { .start = OMAP_RNG_BASE, .end = OMAP_RNG_BASE + 0x4f, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_rng_device = { .name = "omap_rng", .id = -1, .num_resources = ARRAY_SIZE(rng_resources), .resource = rng_resources, }; static void omap_init_rng(void) { (void) platform_device_register(&omap_rng_device); } #else static inline void omap_init_rng(void) {} #endif /*-------------------------------------------------------------------------*/ /* Numbering for the SPI-capable controllers when used for SPI: * spi = 1 * uwire = 2 * mmc1..2 = 3..4 * mcbsp1..3 = 5..7 */ #if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE) #define OMAP_UWIRE_BASE 0xfffb3000 static struct resource uwire_resources[] = { { .start = OMAP_UWIRE_BASE, .end = OMAP_UWIRE_BASE + 0x20, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_uwire_device = { .name = "omap_uwire", .id = -1, .num_resources = ARRAY_SIZE(uwire_resources), .resource = uwire_resources, }; static void omap_init_uwire(void) { /* FIXME define and use a boot tag; not all boards will be hooking * up devices to the microwire controller, and multi-board configs * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway... */ /* board-specific code must configure chipselects (only a few * are normally used) and SCLK/SDI/SDO (each has two choices). */ (void) platform_device_register(&omap_uwire_device); } #else static inline void omap_init_uwire(void) {} #endif #if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE) static phys_addr_t omap_dsp_phys_mempool_base; void __init omap_dsp_reserve_sdram_memblock(void) { phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; phys_addr_t paddr; if (!size) return; paddr = arm_memblock_steal(size, SZ_1M); if (!paddr) { pr_err("%s: failed to reserve %x bytes\n", __func__, size); return; } omap_dsp_phys_mempool_base = paddr; } phys_addr_t omap_dsp_get_mempool_base(void) { return omap_dsp_phys_mempool_base; } EXPORT_SYMBOL(omap_dsp_get_mempool_base); #endif /* * This gets called after board-specific INIT_MACHINE, and initializes most * on-chip peripherals accessible on this board (except for few like USB): * * (a) Does any "standard config" pin muxing needed. Board-specific * code will have muxed GPIO pins and done "nonstandard" setup; * that code could live in the boot loader. * (b) Populating board-specific platform_data with the data drivers * rely on to handle wiring variations. * (c) Creating platform devices as meaningful on this board and * with this kernel configuration. * * Claiming GPIOs, and setting their direction and initial values, is the * responsibility of the device drivers. So is responding to probe(). * * Board-specific knowledge like creating devices or pin setup is to be * kept out of drivers as much as possible. In particular, pin setup * may be handled by the boot loader, and drivers should expect it will * normally have been done by the time they're probed. */ static int __init omap_init_devices(void) { /* please keep these calls, and their implementations above, * in alphabetical order so they're easier to sort through. */ omap_init_rng(); omap_init_uwire(); return 0; } arch_initcall(omap_init_devices);
gpl-2.0
TheBootloader/android_kernel_samsung_msm8930-common
sound/isa/sb/jazz16.c
5077
10703
/* * jazz16.c - driver for Media Vision Jazz16 based soundcards. * Copyright (C) 2009 Krzysztof Helt <krzysztof.h1@wp.pl> * Based on patches posted by Rask Ingemann Lambertsen and Rene Herman. * Based on OSS Sound Blaster driver. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/dma.h> #include <linux/isa.h> #include <sound/core.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define PFX "jazz16: " MODULE_DESCRIPTION("Media Vision Jazz16"); MODULE_SUPPORTED_DEVICE("{{Media Vision ??? }," "{RTL,RTL3000}}"); MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static unsigned long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static unsigned long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Media Vision Jazz16 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Media Vision Jazz16 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Media Vision Jazz16 based soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for jazz16 driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for jazz16 driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for jazz16 driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for jazz16 driver."); module_param_array(dma8, int, NULL, 0444); MODULE_PARM_DESC(dma8, "DMA8 # for jazz16 driver."); module_param_array(dma16, int, NULL, 0444); MODULE_PARM_DESC(dma16, "DMA16 # for jazz16 driver."); #define SB_JAZZ16_WAKEUP 0xaf #define SB_JAZZ16_SET_PORTS 0x50 #define SB_DSP_GET_JAZZ_BRD_REV 0xfa #define SB_JAZZ16_SET_DMAINTR 0xfb #define SB_DSP_GET_JAZZ_MODEL 0xfe struct snd_card_jazz16 { struct snd_sb *chip; }; static irqreturn_t jazz16_interrupt(int irq, void *chip) { return snd_sb8dsp_interrupt(chip); } static int __devinit jazz16_configure_ports(unsigned long port, unsigned long mpu_port, int idx) { unsigned char val; if (!request_region(0x201, 1, "jazz16 config")) { snd_printk(KERN_ERR "config port region is already in use.\n"); return -EBUSY; } outb(SB_JAZZ16_WAKEUP - idx, 0x201); udelay(100); outb(SB_JAZZ16_SET_PORTS + idx, 0x201); udelay(100); val = port & 0x70; val |= (mpu_port & 0x30) >> 4; outb(val, 0x201); release_region(0x201, 1); return 0; } static int __devinit jazz16_detect_board(unsigned long port, unsigned long mpu_port) { int err; int val; struct snd_sb chip; if (!request_region(port, 0x10, "jazz16")) { snd_printk(KERN_ERR "I/O port region is already in use.\n"); return -EBUSY; } /* just to call snd_sbdsp_command/reset/get_byte() */ chip.port = port; err = snd_sbdsp_reset(&chip); if (err < 0) for (val = 0; val < 4; val++) { err = jazz16_configure_ports(port, mpu_port, val); if (err < 0) break; err = snd_sbdsp_reset(&chip); if (!err) break; } if (err < 0) { err = -ENODEV; goto err_unmap; } if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_BRD_REV)) { err = -EBUSY; goto err_unmap; } val = snd_sbdsp_get_byte(&chip); if (val >= 0x30) snd_sbdsp_get_byte(&chip); if ((val & 0xf0) != 0x10) { err = -ENODEV; goto err_unmap; } if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_MODEL)) { err = -EBUSY; goto err_unmap; } snd_sbdsp_get_byte(&chip); err = snd_sbdsp_get_byte(&chip); snd_printd("Media Vision Jazz16 board detected: rev 0x%x, model 0x%x\n", val, err); err = 0; err_unmap: release_region(port, 0x10); return err; } static int __devinit jazz16_configure_board(struct snd_sb *chip, int mpu_irq) { static unsigned char jazz_irq_bits[] = { 0, 0, 2, 3, 0, 1, 0, 4, 0, 2, 5, 0, 0, 0, 0, 6 }; static unsigned char jazz_dma_bits[] = { 0, 1, 0, 2, 0, 3, 0, 4 }; if (jazz_dma_bits[chip->dma8] == 0 || jazz_dma_bits[chip->dma16] == 0 || jazz_irq_bits[chip->irq] == 0) return -EINVAL; if (!snd_sbdsp_command(chip, SB_JAZZ16_SET_DMAINTR)) return -EBUSY; if (!snd_sbdsp_command(chip, jazz_dma_bits[chip->dma8] | (jazz_dma_bits[chip->dma16] << 4))) return -EBUSY; if (!snd_sbdsp_command(chip, jazz_irq_bits[chip->irq] | (jazz_irq_bits[mpu_irq] << 4))) return -EBUSY; return 0; } static int __devinit snd_jazz16_match(struct device *devptr, unsigned int dev) { if (!enable[dev]) return 0; if (port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "please specify port\n"); return 0; } else if (port[dev] == 0x200 || (port[dev] & ~0x270)) { snd_printk(KERN_ERR "incorrect port specified\n"); return 0; } if (dma8[dev] != SNDRV_AUTO_DMA && dma8[dev] != 1 && dma8[dev] != 3) { snd_printk(KERN_ERR "dma8 must be 1 or 3\n"); return 0; } if (dma16[dev] != SNDRV_AUTO_DMA && dma16[dev] != 5 && dma16[dev] != 7) { snd_printk(KERN_ERR "dma16 must be 5 or 7\n"); return 0; } if (mpu_port[dev] != SNDRV_AUTO_PORT && (mpu_port[dev] & ~0x030) != 0x300) { snd_printk(KERN_ERR "incorrect mpu_port specified\n"); return 0; } if (mpu_irq[dev] != SNDRV_AUTO_DMA && mpu_irq[dev] != 2 && mpu_irq[dev] != 3 && mpu_irq[dev] != 5 && mpu_irq[dev] != 7) { snd_printk(KERN_ERR "mpu_irq must be 2, 3, 5 or 7\n"); return 0; } return 1; } static int __devinit snd_jazz16_probe(struct device *devptr, unsigned int dev) { struct snd_card *card; struct snd_card_jazz16 *jazz16; struct snd_sb *chip; struct snd_opl3 *opl3; static int possible_irqs[] = {2, 3, 5, 7, 9, 10, 15, -1}; static int possible_dmas8[] = {1, 3, -1}; static int possible_dmas16[] = {5, 7, -1}; int err, xirq, xdma8, xdma16, xmpu_port, xmpu_irq; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_jazz16), &card); if (err < 0) return err; jazz16 = card->private_data; xirq = irq[dev]; if (xirq == SNDRV_AUTO_IRQ) { xirq = snd_legacy_find_free_irq(possible_irqs); if (xirq < 0) { snd_printk(KERN_ERR "unable to find a free IRQ\n"); err = -EBUSY; goto err_free; } } xdma8 = dma8[dev]; if (xdma8 == SNDRV_AUTO_DMA) { xdma8 = snd_legacy_find_free_dma(possible_dmas8); if (xdma8 < 0) { snd_printk(KERN_ERR "unable to find a free DMA8\n"); err = -EBUSY; goto err_free; } } xdma16 = dma16[dev]; if (xdma16 == SNDRV_AUTO_DMA) { xdma16 = snd_legacy_find_free_dma(possible_dmas16); if (xdma16 < 0) { snd_printk(KERN_ERR "unable to find a free DMA16\n"); err = -EBUSY; goto err_free; } } xmpu_port = mpu_port[dev]; if (xmpu_port == SNDRV_AUTO_PORT) xmpu_port = 0; err = jazz16_detect_board(port[dev], xmpu_port); if (err < 0) { printk(KERN_ERR "Media Vision Jazz16 board not detected\n"); goto err_free; } err = snd_sbdsp_create(card, port[dev], irq[dev], jazz16_interrupt, dma8[dev], dma16[dev], SB_HW_JAZZ16, &chip); if (err < 0) goto err_free; xmpu_irq = mpu_irq[dev]; if (xmpu_irq == SNDRV_AUTO_IRQ || mpu_port[dev] == SNDRV_AUTO_PORT) xmpu_irq = 0; err = jazz16_configure_board(chip, xmpu_irq); if (err < 0) { printk(KERN_ERR "Media Vision Jazz16 configuration failed\n"); goto err_free; } jazz16->chip = chip; strcpy(card->driver, "jazz16"); strcpy(card->shortname, "Media Vision Jazz16"); sprintf(card->longname, "Media Vision Jazz16 at 0x%lx, irq %d, dma8 %d, dma16 %d", port[dev], xirq, xdma8, xdma16); err = snd_sb8dsp_pcm(chip, 0, NULL); if (err < 0) goto err_free; err = snd_sbmixer_new(chip); if (err < 0) goto err_free; err = snd_opl3_create(card, chip->port, chip->port + 2, OPL3_HW_AUTO, 1, &opl3); if (err < 0) snd_printk(KERN_WARNING "no OPL device at 0x%lx-0x%lx\n", chip->port, chip->port + 2); else { err = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (err < 0) goto err_free; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (mpu_irq[dev] == SNDRV_AUTO_IRQ) mpu_irq[dev] = -1; if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) snd_printk(KERN_ERR "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } snd_card_set_dev(card, devptr); err = snd_card_register(card); if (err < 0) goto err_free; dev_set_drvdata(devptr, card); return 0; err_free: snd_card_free(card); return err; } static int __devexit snd_jazz16_remove(struct device *devptr, unsigned int dev) { struct snd_card *card = dev_get_drvdata(devptr); dev_set_drvdata(devptr, NULL); snd_card_free(card); return 0; } #ifdef CONFIG_PM static int snd_jazz16_suspend(struct device *pdev, unsigned int n, pm_message_t state) { struct snd_card *card = dev_get_drvdata(pdev); struct snd_card_jazz16 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_sbmixer_suspend(chip); return 0; } static int snd_jazz16_resume(struct device *pdev, unsigned int n) { struct snd_card *card = dev_get_drvdata(pdev); struct snd_card_jazz16 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_sbdsp_reset(chip); snd_sbmixer_resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct isa_driver snd_jazz16_driver = { .match = snd_jazz16_match, .probe = snd_jazz16_probe, .remove = __devexit_p(snd_jazz16_remove), #ifdef CONFIG_PM .suspend = snd_jazz16_suspend, .resume = snd_jazz16_resume, #endif .driver = { .name = "jazz16" }, }; static int __init alsa_card_jazz16_init(void) { return isa_register_driver(&snd_jazz16_driver, SNDRV_CARDS); } static void __exit alsa_card_jazz16_exit(void) { isa_unregister_driver(&snd_jazz16_driver); } module_init(alsa_card_jazz16_init) module_exit(alsa_card_jazz16_exit)
gpl-2.0
TeamHackLG/lge-kernel-lproj
arch/blackfin/mach-bf561/boards/tepla.c
7381
3527
/* * Copyright 2004-2007 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Thanks to Jamey Hicks. * * Only SMSC91C1111 was registered, may do more later. * * Licensed under the GPL-2 */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/irq.h> const char bfin_board_name[] = "Tepla-BF561"; /* * Driver needs to know address, irq and flag pin. */ static struct resource smc91x_resources[] = { { .start = 0x2C000300, .end = 0x2C000320, .flags = IORESOURCE_MEM, }, { .start = IRQ_PROG_INTB, .end = IRQ_PROG_INTB, .flags = IORESOURCE_IRQ|IORESOURCE_IRQ_HIGHLEVEL, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ|IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART_TX, .end = IRQ_UART_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_RX, .end = IRQ_UART_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_ERROR, .end = IRQ_UART_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART_TX, .end = CH_UART_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART_RX, .end = CH_UART_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif static struct platform_device *tepla_devices[] __initdata = { &smc91x_device, #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif }; static int __init tepla_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); return platform_add_devices(tepla_devices, ARRAY_SIZE(tepla_devices)); } arch_initcall(tepla_init); static struct platform_device *tepla_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(tepla_early_devices, ARRAY_SIZE(tepla_early_devices)); }
gpl-2.0
edoko/Air_Kernel-N5
drivers/media/rc/keymaps/rc-proteus-2309.c
7637
1879
/* proteus-2309.h - Keytable for proteus_2309 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Michal Majchrowicz <mmajchrowicz@gmail.com> */ static struct rc_map_table proteus_2309[] = { /* numeric */ { 0x00, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x5c, KEY_POWER }, /* power */ { 0x20, KEY_ZOOM }, /* full screen */ { 0x0f, KEY_BACKSPACE }, /* recall */ { 0x1b, KEY_ENTER }, /* mute */ { 0x41, KEY_RECORD }, /* record */ { 0x43, KEY_STOP }, /* stop */ { 0x16, KEY_S }, { 0x1a, KEY_POWER2 }, /* off */ { 0x2e, KEY_RED }, { 0x1f, KEY_CHANNELDOWN }, /* channel - */ { 0x1c, KEY_CHANNELUP }, /* channel + */ { 0x10, KEY_VOLUMEDOWN }, /* volume - */ { 0x1e, KEY_VOLUMEUP }, /* volume + */ { 0x14, KEY_F1 }, }; static struct rc_map_list proteus_2309_map = { .map = { .scan = proteus_2309, .size = ARRAY_SIZE(proteus_2309), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PROTEUS_2309, } }; static int __init init_rc_map_proteus_2309(void) { return rc_map_register(&proteus_2309_map); } static void __exit exit_rc_map_proteus_2309(void) { rc_map_unregister(&proteus_2309_map); } module_init(init_rc_map_proteus_2309) module_exit(exit_rc_map_proteus_2309) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
netico-solutions/linux-amxx
drivers/media/rc/keymaps/rc-avermedia.c
7637
2240
/* avermedia.h - Keytable for avermedia Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Alex Hermann <gaaf@gmx.net> */ static struct rc_map_table avermedia[] = { { 0x28, KEY_1 }, { 0x18, KEY_2 }, { 0x38, KEY_3 }, { 0x24, KEY_4 }, { 0x14, KEY_5 }, { 0x34, KEY_6 }, { 0x2c, KEY_7 }, { 0x1c, KEY_8 }, { 0x3c, KEY_9 }, { 0x22, KEY_0 }, { 0x20, KEY_TV }, /* TV/FM */ { 0x10, KEY_CD }, /* CD */ { 0x30, KEY_TEXT }, /* TELETEXT */ { 0x00, KEY_POWER }, /* POWER */ { 0x08, KEY_VIDEO }, /* VIDEO */ { 0x04, KEY_AUDIO }, /* AUDIO */ { 0x0c, KEY_ZOOM }, /* FULL SCREEN */ { 0x12, KEY_SUBTITLE }, /* DISPLAY */ { 0x32, KEY_REWIND }, /* LOOP */ { 0x02, KEY_PRINT }, /* PREVIEW */ { 0x2a, KEY_SEARCH }, /* AUTOSCAN */ { 0x1a, KEY_SLEEP }, /* FREEZE */ { 0x3a, KEY_CAMERA }, /* SNAPSHOT */ { 0x0a, KEY_MUTE }, /* MUTE */ { 0x26, KEY_RECORD }, /* RECORD */ { 0x16, KEY_PAUSE }, /* PAUSE */ { 0x36, KEY_STOP }, /* STOP */ { 0x06, KEY_PLAY }, /* PLAY */ { 0x2e, KEY_RED }, /* RED */ { 0x21, KEY_GREEN }, /* GREEN */ { 0x0e, KEY_YELLOW }, /* YELLOW */ { 0x01, KEY_BLUE }, /* BLUE */ { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */ { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */ { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */ { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */ }; static struct rc_map_list avermedia_map = { .map = { .scan = avermedia, .size = ARRAY_SIZE(avermedia), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_AVERMEDIA, } }; static int __init init_rc_map_avermedia(void) { return rc_map_register(&avermedia_map); } static void __exit exit_rc_map_avermedia(void) { rc_map_unregister(&avermedia_map); } module_init(init_rc_map_avermedia) module_exit(exit_rc_map_avermedia) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
SOKP/kernel_xiaomi_armani
drivers/media/rc/keymaps/rc-pixelview-002t.c
7637
1925
/* rc-pixelview-mk12.h - Keytable for pixelview Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* * Keytable for 002-T IR remote provided together with Pixelview * SBTVD Hybrid Remote Controller. Uses NEC extended format. */ static struct rc_map_table pixelview_002t[] = { { 0x866b13, KEY_MUTE }, { 0x866b12, KEY_POWER2 }, /* power */ { 0x866b01, KEY_1 }, { 0x866b02, KEY_2 }, { 0x866b03, KEY_3 }, { 0x866b04, KEY_4 }, { 0x866b05, KEY_5 }, { 0x866b06, KEY_6 }, { 0x866b07, KEY_7 }, { 0x866b08, KEY_8 }, { 0x866b09, KEY_9 }, { 0x866b00, KEY_0 }, { 0x866b0d, KEY_CHANNELUP }, { 0x866b19, KEY_CHANNELDOWN }, { 0x866b10, KEY_VOLUMEUP }, /* vol + */ { 0x866b0c, KEY_VOLUMEDOWN }, /* vol - */ { 0x866b0a, KEY_CAMERA }, /* snapshot */ { 0x866b0b, KEY_ZOOM }, /* zoom */ { 0x866b1b, KEY_BACKSPACE }, { 0x866b15, KEY_ENTER }, { 0x866b1d, KEY_UP }, { 0x866b1e, KEY_DOWN }, { 0x866b0e, KEY_LEFT }, { 0x866b0f, KEY_RIGHT }, { 0x866b18, KEY_RECORD }, { 0x866b1a, KEY_STOP }, }; static struct rc_map_list pixelview_map = { .map = { .scan = pixelview_002t, .size = ARRAY_SIZE(pixelview_002t), .rc_type = RC_TYPE_NEC, .name = RC_MAP_PIXELVIEW_002T, } }; static int __init init_rc_map_pixelview(void) { return rc_map_register(&pixelview_map); } static void __exit exit_rc_map_pixelview(void) { rc_map_unregister(&pixelview_map); } module_init(init_rc_map_pixelview) module_exit(exit_rc_map_pixelview) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
SlimRoms/kernel_motorola_ghost
drivers/media/rc/keymaps/rc-cinergy-1400.c
7637
1917
/* cinergy-1400.h - Keytable for cinergy_1400 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Cinergy 1400 DVB-T */ static struct rc_map_table cinergy_1400[] = { { 0x01, KEY_POWER }, { 0x02, KEY_1 }, { 0x03, KEY_2 }, { 0x04, KEY_3 }, { 0x05, KEY_4 }, { 0x06, KEY_5 }, { 0x07, KEY_6 }, { 0x08, KEY_7 }, { 0x09, KEY_8 }, { 0x0a, KEY_9 }, { 0x0c, KEY_0 }, { 0x0b, KEY_VIDEO }, { 0x0d, KEY_REFRESH }, { 0x0e, KEY_SELECT }, { 0x0f, KEY_EPG }, { 0x10, KEY_UP }, { 0x11, KEY_LEFT }, { 0x12, KEY_OK }, { 0x13, KEY_RIGHT }, { 0x14, KEY_DOWN }, { 0x15, KEY_TEXT }, { 0x16, KEY_INFO }, { 0x17, KEY_RED }, { 0x18, KEY_GREEN }, { 0x19, KEY_YELLOW }, { 0x1a, KEY_BLUE }, { 0x1b, KEY_CHANNELUP }, { 0x1c, KEY_VOLUMEUP }, { 0x1d, KEY_MUTE }, { 0x1e, KEY_VOLUMEDOWN }, { 0x1f, KEY_CHANNELDOWN }, { 0x40, KEY_PAUSE }, { 0x4c, KEY_PLAY }, { 0x58, KEY_RECORD }, { 0x54, KEY_PREVIOUS }, { 0x48, KEY_STOP }, { 0x5c, KEY_NEXT }, }; static struct rc_map_list cinergy_1400_map = { .map = { .scan = cinergy_1400, .size = ARRAY_SIZE(cinergy_1400), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_CINERGY_1400, } }; static int __init init_rc_map_cinergy_1400(void) { return rc_map_register(&cinergy_1400_map); } static void __exit exit_rc_map_cinergy_1400(void) { rc_map_unregister(&cinergy_1400_map); } module_init(init_rc_map_cinergy_1400) module_exit(exit_rc_map_cinergy_1400) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
shakalaca/ASUS_ZenFone_ZE500KL
kernel/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
7637
2058
/* encore-enltv-fm53.h - Keytable for encore_enltv_fm53 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Encore ENLTV-FM v5.3 Mauro Carvalho Chehab <mchehab@infradead.org> */ static struct rc_map_table encore_enltv_fm53[] = { { 0x10, KEY_POWER2}, { 0x06, KEY_MUTE}, { 0x09, KEY_1}, { 0x1d, KEY_2}, { 0x1f, KEY_3}, { 0x19, KEY_4}, { 0x1b, KEY_5}, { 0x11, KEY_6}, { 0x17, KEY_7}, { 0x12, KEY_8}, { 0x16, KEY_9}, { 0x48, KEY_0}, { 0x04, KEY_LIST}, /* -/-- */ { 0x40, KEY_LAST}, /* recall */ { 0x02, KEY_MODE}, /* TV/AV */ { 0x05, KEY_CAMERA}, /* SNAPSHOT */ { 0x4c, KEY_CHANNELUP}, /* UP */ { 0x00, KEY_CHANNELDOWN}, /* DOWN */ { 0x0d, KEY_VOLUMEUP}, /* RIGHT */ { 0x15, KEY_VOLUMEDOWN}, /* LEFT */ { 0x49, KEY_ENTER}, /* OK */ { 0x54, KEY_RECORD}, { 0x4d, KEY_PLAY}, /* pause */ { 0x1e, KEY_MENU}, /* video setting */ { 0x0e, KEY_RIGHT}, /* <- */ { 0x1a, KEY_LEFT}, /* -> */ { 0x0a, KEY_CLEAR}, /* video default */ { 0x0c, KEY_ZOOM}, /* hide pannel */ { 0x47, KEY_SLEEP}, /* shutdown */ }; static struct rc_map_list encore_enltv_fm53_map = { .map = { .scan = encore_enltv_fm53, .size = ARRAY_SIZE(encore_enltv_fm53), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_ENCORE_ENLTV_FM53, } }; static int __init init_rc_map_encore_enltv_fm53(void) { return rc_map_register(&encore_enltv_fm53_map); } static void __exit exit_rc_map_encore_enltv_fm53(void) { rc_map_unregister(&encore_enltv_fm53_map); } module_init(init_rc_map_encore_enltv_fm53) module_exit(exit_rc_map_encore_enltv_fm53) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
jamison904/Nexus7_kernel
drivers/media/rc/keymaps/rc-flyvideo.c
7637
1914
/* flyvideo.h - Keytable for flyvideo Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table flyvideo[] = { { 0x0f, KEY_0 }, { 0x03, KEY_1 }, { 0x04, KEY_2 }, { 0x05, KEY_3 }, { 0x07, KEY_4 }, { 0x08, KEY_5 }, { 0x09, KEY_6 }, { 0x0b, KEY_7 }, { 0x0c, KEY_8 }, { 0x0d, KEY_9 }, { 0x0e, KEY_MODE }, /* Air/Cable */ { 0x11, KEY_VIDEO }, /* Video */ { 0x15, KEY_AUDIO }, /* Audio */ { 0x00, KEY_POWER }, /* Power */ { 0x18, KEY_TUNER }, /* AV Source */ { 0x02, KEY_ZOOM }, /* Fullscreen */ { 0x1a, KEY_LANGUAGE }, /* Stereo */ { 0x1b, KEY_MUTE }, /* Mute */ { 0x14, KEY_VOLUMEUP }, /* Volume + */ { 0x17, KEY_VOLUMEDOWN },/* Volume - */ { 0x12, KEY_CHANNELUP },/* Channel + */ { 0x13, KEY_CHANNELDOWN },/* Channel - */ { 0x06, KEY_AGAIN }, /* Recall */ { 0x10, KEY_ENTER }, /* Enter */ { 0x19, KEY_BACK }, /* Rewind ( <<< ) */ { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */ { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */ }; static struct rc_map_list flyvideo_map = { .map = { .scan = flyvideo, .size = ARRAY_SIZE(flyvideo), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_FLYVIDEO, } }; static int __init init_rc_map_flyvideo(void) { return rc_map_register(&flyvideo_map); } static void __exit exit_rc_map_flyvideo(void) { rc_map_unregister(&flyvideo_map); } module_init(init_rc_map_flyvideo) module_exit(exit_rc_map_flyvideo) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Dev-Ghost/android_kernel_taoshan
drivers/md/dm-target.c
8149
2670
/* * Copyright (C) 2001 Sistina Software (UK) Limited * * This file is released under the GPL. */ #include "dm.h" #include <linux/module.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/bio.h> #define DM_MSG_PREFIX "target" static LIST_HEAD(_targets); static DECLARE_RWSEM(_lock); #define DM_MOD_NAME_SIZE 32 static inline struct target_type *__find_target_type(const char *name) { struct target_type *tt; list_for_each_entry(tt, &_targets, list) if (!strcmp(name, tt->name)) return tt; return NULL; } static struct target_type *get_target_type(const char *name) { struct target_type *tt; down_read(&_lock); tt = __find_target_type(name); if (tt && !try_module_get(tt->module)) tt = NULL; up_read(&_lock); return tt; } static void load_module(const char *name) { request_module("dm-%s", name); } struct target_type *dm_get_target_type(const char *name) { struct target_type *tt = get_target_type(name); if (!tt) { load_module(name); tt = get_target_type(name); } return tt; } void dm_put_target_type(struct target_type *tt) { down_read(&_lock); module_put(tt->module); up_read(&_lock); } int dm_target_iterate(void (*iter_func)(struct target_type *tt, void *param), void *param) { struct target_type *tt; down_read(&_lock); list_for_each_entry(tt, &_targets, list) iter_func(tt, param); up_read(&_lock); return 0; } int dm_register_target(struct target_type *tt) { int rv = 0; down_write(&_lock); if (__find_target_type(tt->name)) rv = -EEXIST; else list_add(&tt->list, &_targets); up_write(&_lock); return rv; } void dm_unregister_target(struct target_type *tt) { down_write(&_lock); if (!__find_target_type(tt->name)) { DMCRIT("Unregistering unrecognised target: %s", tt->name); BUG(); } list_del(&tt->list); up_write(&_lock); } /* * io-err: always fails an io, useful for bringing * up LVs that have holes in them. */ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) { /* * Return error for discards instead of -EOPNOTSUPP */ tt->num_discard_requests = 1; return 0; } static void io_err_dtr(struct dm_target *tt) { /* empty */ } static int io_err_map(struct dm_target *tt, struct bio *bio, union map_info *map_context) { return -EIO; } static struct target_type error_target = { .name = "error", .version = {1, 0, 1}, .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, }; int __init dm_target_init(void) { return dm_register_target(&error_target); } void dm_target_exit(void) { dm_unregister_target(&error_target); } EXPORT_SYMBOL(dm_register_target); EXPORT_SYMBOL(dm_unregister_target);
gpl-2.0
hiikezoe/android_kernel_panasonic_p02e
drivers/staging/comedi/drivers/adl_pci6208.c
8149
13009
/* comedi/drivers/adl_pci6208.c Hardware driver for ADLink 6208 series cards: card | voltage output | current output -------------+-------------------+--------------- PCI-6208V | 8 channels | - PCI-6216V | 16 channels | - PCI-6208A | 8 channels | 8 channels COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: adl_pci6208 Description: ADLink PCI-6208A Devices: [ADLink] PCI-6208A (adl_pci6208) Author: nsyeow <nsyeow@pd.jaring.my> Updated: Fri, 30 Jan 2004 14:44:27 +0800 Status: untested Configuration Options: none References: - ni_660x.c - adl_pci9111.c copied the entire pci setup section - adl_pci9118.c */ /* * These headers should be followed by a blank line, and any comments * you wish to say about the driver. The comment area is the place * to put any known bugs, limitations, unsupported features, supported * command triggers, whether or not commands are supported on particular * subdevices, etc. * * Somewhere in the comment should be information about configuration * options that are used with comedi_config. */ #include "../comedidev.h" #include "comedi_pci.h" #define PCI6208_DRIVER_NAME "adl_pci6208" /* Board descriptions */ struct pci6208_board { const char *name; unsigned short dev_id; /* `lspci` will show you this */ int ao_chans; /* int ao_bits; */ }; static const struct pci6208_board pci6208_boards[] = { /*{ .name = "pci6208v", .dev_id = 0x6208, // not sure .ao_chans = 8 // , .ao_bits = 16 }, { .name = "pci6216v", .dev_id = 0x6208, // not sure .ao_chans = 16 // , .ao_bits = 16 }, */ { .name = "pci6208a", .dev_id = 0x6208, .ao_chans = 8 /* , .ao_bits = 16 */ } }; /* This is used by modprobe to translate PCI IDs to drivers. Should * only be used for PCI and ISA-PnP devices */ static DEFINE_PCI_DEVICE_TABLE(pci6208_pci_table) = { /* { PCI_VENDOR_ID_ADLINK, 0x6208, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, */ /* { PCI_VENDOR_ID_ADLINK, 0x6208, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, */ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x6208) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pci6208_pci_table); /* Will be initialized in pci6208_find device(). */ #define thisboard ((const struct pci6208_board *)dev->board_ptr) struct pci6208_private { int data; struct pci_dev *pci_dev; /* for a PCI device */ unsigned int ao_readback[2]; /* Used for AO readback */ }; #define devpriv ((struct pci6208_private *)dev->private) static int pci6208_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pci6208_detach(struct comedi_device *dev); static struct comedi_driver driver_pci6208 = { .driver_name = PCI6208_DRIVER_NAME, .module = THIS_MODULE, .attach = pci6208_attach, .detach = pci6208_detach, }; static int __devinit driver_pci6208_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_pci6208.driver_name); } static void __devexit driver_pci6208_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_pci6208_pci_driver = { .id_table = pci6208_pci_table, .probe = &driver_pci6208_pci_probe, .remove = __devexit_p(&driver_pci6208_pci_remove) }; static int __init driver_pci6208_init_module(void) { int retval; retval = comedi_driver_register(&driver_pci6208); if (retval < 0) return retval; driver_pci6208_pci_driver.name = (char *)driver_pci6208.driver_name; return pci_register_driver(&driver_pci6208_pci_driver); } static void __exit driver_pci6208_cleanup_module(void) { pci_unregister_driver(&driver_pci6208_pci_driver); comedi_driver_unregister(&driver_pci6208); } module_init(driver_pci6208_init_module); module_exit(driver_pci6208_cleanup_module); static int pci6208_find_device(struct comedi_device *dev, int bus, int slot); static int pci6208_pci_setup(struct pci_dev *pci_dev, unsigned long *io_base_ptr, int dev_minor); /*read/write functions*/ static int pci6208_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int pci6208_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* static int pci6208_dio_insn_bits (struct comedi_device *dev, * struct comedi_subdevice *s, */ /* struct comedi_insn *insn,unsigned int *data); */ /* static int pci6208_dio_insn_config(struct comedi_device *dev, * struct comedi_subdevice *s, */ /* struct comedi_insn *insn,unsigned int *data); */ /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int pci6208_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int retval; unsigned long io_base; printk(KERN_INFO "comedi%d: pci6208: ", dev->minor); retval = alloc_private(dev, sizeof(struct pci6208_private)); if (retval < 0) return retval; retval = pci6208_find_device(dev, it->options[0], it->options[1]); if (retval < 0) return retval; retval = pci6208_pci_setup(devpriv->pci_dev, &io_base, dev->minor); if (retval < 0) return retval; dev->iobase = io_base; dev->board_name = thisboard->name; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 2) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; /* anything else to add here?? */ s->n_chan = thisboard->ao_chans; s->maxdata = 0xffff; /* 16-bit DAC */ s->range_table = &range_bipolar10; /* this needs to be checked. */ s->insn_write = pci6208_ao_winsn; s->insn_read = pci6208_ao_rinsn; /* s=dev->subdevices+1; */ /* digital i/o subdevice */ /* s->type=COMEDI_SUBD_DIO; */ /* s->subdev_flags=SDF_READABLE|SDF_WRITABLE; */ /* s->n_chan=16; */ /* s->maxdata=1; */ /* s->range_table=&range_digital; */ /* s->insn_bits = pci6208_dio_insn_bits; */ /* s->insn_config = pci6208_dio_insn_config; */ printk(KERN_INFO "attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pci6208_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pci6208: remove\n", dev->minor); if (devpriv && devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } static int pci6208_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i = 0, Data_Read; unsigned short chan = CR_CHAN(insn->chanspec); unsigned long invert = 1 << (16 - 1); unsigned long out_value; /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; i++) { out_value = data[i] ^ invert; /* a typical programming sequence */ do { Data_Read = (inw(dev->iobase) & 1); } while (Data_Read); outw(out_value, dev->iobase + (0x02 * chan)); devpriv->ao_readback[chan] = out_value; } /* return the number of samples read/written */ return i; } /* AO subdevices should have a read insn as well as a write insn. * Usually this means copying a value stored in devpriv. */ static int pci6208_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } /* DIO devices are slightly special. Although it is possible to * implement the insn_read/insn_write interface, it is much more * useful to applications if you implement the insn_bits interface. * This allows packed reading/writing of the DIO channels. The * comedi core can convert between insn_bits and insn_read/write */ /* static int pci6208_dio_insn_bits(struct comedi_device *dev, * struct comedi_subdevice *s, */ /* struct comedi_insn *insn,unsigned int *data) */ /* { */ /* if(insn->n!=2)return -EINVAL; */ /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ /* if(data[0]){ */ /* s->state &= ~data[0]; */ /* s->state |= data[0]&data[1]; */ /* Write out the new digital output lines */ /* outw(s->state,dev->iobase + SKEL_DIO); */ /* } */ /* on return, data[1] contains the value of the digital * input and output lines. */ /* data[1]=inw(dev->iobase + SKEL_DIO); */ /* or we could just return the software copy of the output values if * it was a purely digital output subdevice */ /* data[1]=s->state; */ /* return 2; */ /* } */ /* static int pci6208_dio_insn_config(struct comedi_device *dev, * struct comedi_subdevice *s, */ /* struct comedi_insn *insn,unsigned int *data) */ /* { */ /* int chan=CR_CHAN(insn->chanspec); */ /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ /* if(data[0]==COMEDI_OUTPUT){ */ /* s->io_bits |= 1<<chan; */ /* }else{ */ /* s->io_bits &= ~(1<<chan); */ /* } */ /* outw(s->io_bits,dev->iobase + SKEL_DIO_CONFIG); */ /* return 1; */ /* } */ static int pci6208_find_device(struct comedi_device *dev, int bus, int slot) { struct pci_dev *pci_dev = NULL; int i; for_each_pci_dev(pci_dev) { if (pci_dev->vendor == PCI_VENDOR_ID_ADLINK) { for (i = 0; i < ARRAY_SIZE(pci6208_boards); i++) { if (pci6208_boards[i].dev_id == pci_dev->device) { /* * was a particular bus/slot requested? */ if ((bus != 0) || (slot != 0)) { /* * are we on the * wrong bus/slot? */ if (pci_dev->bus->number != bus || PCI_SLOT(pci_dev->devfn) != slot) { continue; } } dev->board_ptr = pci6208_boards + i; goto found; } } } } printk(KERN_ERR "comedi%d: no supported board found! " "(req. bus/slot : %d/%d)\n", dev->minor, bus, slot); return -EIO; found: printk("comedi%d: found %s (b:s:f=%d:%d:%d) , irq=%d\n", dev->minor, pci6208_boards[i].name, pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), pci_dev->irq); /* TODO: Warn about non-tested boards. */ /* switch(board->device_id) */ /* { */ /* }; */ devpriv->pci_dev = pci_dev; return 0; } static int pci6208_pci_setup(struct pci_dev *pci_dev, unsigned long *io_base_ptr, int dev_minor) { unsigned long io_base, io_range, lcr_io_base, lcr_io_range; /* Enable PCI device and request regions */ if (comedi_pci_enable(pci_dev, PCI6208_DRIVER_NAME) < 0) { printk(KERN_ERR "comedi%d: Failed to enable PCI device " "and request regions\n", dev_minor); return -EIO; } /* Read local configuration register * base address [PCI_BASE_ADDRESS #1]. */ lcr_io_base = pci_resource_start(pci_dev, 1); lcr_io_range = pci_resource_len(pci_dev, 1); printk(KERN_INFO "comedi%d: local config registers at address" " 0x%4lx [0x%4lx]\n", dev_minor, lcr_io_base, lcr_io_range); /* Read PCI6208 register base address [PCI_BASE_ADDRESS #2]. */ io_base = pci_resource_start(pci_dev, 2); io_range = pci_resource_end(pci_dev, 2) - io_base + 1; printk("comedi%d: 6208 registers at address 0x%4lx [0x%4lx]\n", dev_minor, io_base, io_range); *io_base_ptr = io_base; /* devpriv->io_range = io_range; */ /* devpriv->is_valid=0; */ /* devpriv->lcr_io_base=lcr_io_base; */ /* devpriv->lcr_io_range=lcr_io_range; */ return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
zzpu/linux-stack
security/integrity/ima/ima_template_lib.c
214
8817
/* * Copyright (C) 2013 Politecnico di Torino, Italy * TORSEC group -- http://security.polito.it * * Author: Roberto Sassu <roberto.sassu@polito.it> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_template_lib.c * Library of supported template fields. */ #include "ima_template_lib.h" static bool ima_template_hash_algo_allowed(u8 algo) { if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5) return true; return false; } enum data_formats { DATA_FMT_DIGEST = 0, DATA_FMT_DIGEST_WITH_ALGO, DATA_FMT_STRING, DATA_FMT_HEX }; static int ima_write_template_field_data(const void *data, const u32 datalen, enum data_formats datafmt, struct ima_field_data *field_data) { u8 *buf, *buf_ptr; u32 buflen = datalen; if (datafmt == DATA_FMT_STRING) buflen = datalen + 1; buf = kzalloc(buflen, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy(buf, data, datalen); /* * Replace all space characters with underscore for event names and * strings. This avoid that, during the parsing of a measurements list, * filenames with spaces or that end with the suffix ' (deleted)' are * split into multiple template fields (the space is the delimitator * character for measurements lists in ASCII format). */ if (datafmt == DATA_FMT_STRING) { for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) if (*buf_ptr == ' ') *buf_ptr = '_'; } field_data->data = buf; field_data->len = buflen; return 0; } static void ima_show_template_data_ascii(struct seq_file *m, enum ima_show_type show, enum data_formats datafmt, struct ima_field_data *field_data) { u8 *buf_ptr = field_data->data; u32 buflen = field_data->len; switch (datafmt) { case DATA_FMT_DIGEST_WITH_ALGO: buf_ptr = strnchr(field_data->data, buflen, ':'); if (buf_ptr != field_data->data) seq_printf(m, "%s", field_data->data); /* skip ':' and '\0' */ buf_ptr += 2; buflen -= buf_ptr - field_data->data; case DATA_FMT_DIGEST: case DATA_FMT_HEX: if (!buflen) break; ima_print_digest(m, buf_ptr, buflen); break; case DATA_FMT_STRING: seq_printf(m, "%s", buf_ptr); break; default: break; } } static void ima_show_template_data_binary(struct seq_file *m, enum ima_show_type show, enum data_formats datafmt, struct ima_field_data *field_data) { u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ? strlen(field_data->data) : field_data->len; if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) ima_putc(m, &len, sizeof(len)); if (!len) return; ima_putc(m, field_data->data, len); } static void ima_show_template_field_data(struct seq_file *m, enum ima_show_type show, enum data_formats datafmt, struct ima_field_data *field_data) { switch (show) { case IMA_SHOW_ASCII: ima_show_template_data_ascii(m, show, datafmt, field_data); break; case IMA_SHOW_BINARY: case IMA_SHOW_BINARY_NO_FIELD_LEN: case IMA_SHOW_BINARY_OLD_STRING_FMT: ima_show_template_data_binary(m, show, datafmt, field_data); break; default: break; } } void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, struct ima_field_data *field_data) { ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data); } void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, struct ima_field_data *field_data) { ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO, field_data); } void ima_show_template_string(struct seq_file *m, enum ima_show_type show, struct ima_field_data *field_data) { ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data); } void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, struct ima_field_data *field_data) { ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data); } static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo, struct ima_field_data *field_data) { /* * digest formats: * - DATA_FMT_DIGEST: digest * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest, * where <hash algo> is provided if the hash algoritm is not * SHA1 or MD5 */ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 }; enum data_formats fmt = DATA_FMT_DIGEST; u32 offset = 0; if (hash_algo < HASH_ALGO__LAST) { fmt = DATA_FMT_DIGEST_WITH_ALGO; offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s", hash_algo_name[hash_algo]); buffer[offset] = ':'; offset += 2; } if (digest) memcpy(buffer + offset, digest, digestsize); else /* * If digest is NULL, the event being recorded is a violation. * Make room for the digest by increasing the offset of * IMA_DIGEST_SIZE. */ offset += IMA_DIGEST_SIZE; return ima_write_template_field_data(buffer, offset + digestsize, fmt, field_data); } /* * This function writes the digest of an event (with size limit). */ int ima_eventdigest_init(struct ima_event_data *event_data, struct ima_field_data *field_data) { struct { struct ima_digest_data hdr; char digest[IMA_MAX_DIGEST_SIZE]; } hash; u8 *cur_digest = NULL; u32 cur_digestsize = 0; struct inode *inode; int result; memset(&hash, 0, sizeof(hash)); if (event_data->violation) /* recording a violation. */ goto out; if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) { cur_digest = event_data->iint->ima_hash->digest; cur_digestsize = event_data->iint->ima_hash->length; goto out; } if (!event_data->file) /* missing info to re-calculate the digest */ return -EINVAL; inode = file_inode(event_data->file); hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ? ima_hash_algo : HASH_ALGO_SHA1; result = ima_calc_file_hash(event_data->file, &hash.hdr); if (result) { integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, event_data->filename, "collect_data", "failed", result, 0); return result; } cur_digest = hash.hdr.digest; cur_digestsize = hash.hdr.length; out: return ima_eventdigest_init_common(cur_digest, cur_digestsize, HASH_ALGO__LAST, field_data); } /* * This function writes the digest of an event (without size limit). */ int ima_eventdigest_ng_init(struct ima_event_data *event_data, struct ima_field_data *field_data) { u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1; u32 cur_digestsize = 0; if (event_data->violation) /* recording a violation. */ goto out; cur_digest = event_data->iint->ima_hash->digest; cur_digestsize = event_data->iint->ima_hash->length; hash_algo = event_data->iint->ima_hash->algo; out: return ima_eventdigest_init_common(cur_digest, cur_digestsize, hash_algo, field_data); } static int ima_eventname_init_common(struct ima_event_data *event_data, struct ima_field_data *field_data, bool size_limit) { const char *cur_filename = NULL; u32 cur_filename_len = 0; BUG_ON(event_data->filename == NULL && event_data->file == NULL); if (event_data->filename) { cur_filename = event_data->filename; cur_filename_len = strlen(event_data->filename); if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX) goto out; } if (event_data->file) { cur_filename = event_data->file->f_path.dentry->d_name.name; cur_filename_len = strlen(cur_filename); } else /* * Truncate filename if the latter is too long and * the file descriptor is not available. */ cur_filename_len = IMA_EVENT_NAME_LEN_MAX; out: return ima_write_template_field_data(cur_filename, cur_filename_len, DATA_FMT_STRING, field_data); } /* * This function writes the name of an event (with size limit). */ int ima_eventname_init(struct ima_event_data *event_data, struct ima_field_data *field_data) { return ima_eventname_init_common(event_data, field_data, true); } /* * This function writes the name of an event (without size limit). */ int ima_eventname_ng_init(struct ima_event_data *event_data, struct ima_field_data *field_data) { return ima_eventname_init_common(event_data, field_data, false); } /* * ima_eventsig_init - include the file signature as part of the template data */ int ima_eventsig_init(struct ima_event_data *event_data, struct ima_field_data *field_data) { enum data_formats fmt = DATA_FMT_HEX; struct evm_ima_xattr_data *xattr_value = event_data->xattr_value; int xattr_len = event_data->xattr_len; int rc = 0; if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG)) goto out; rc = ima_write_template_field_data(xattr_value, xattr_len, fmt, field_data); out: return rc; }
gpl-2.0
mobius1484/Satori
drivers/broadcast/tdmb/lg2102/LGD_EWSDEC.c
214
7193
#include "LGD_INCLUDES.h" // Emergency. Warning. Systems #ifdef LGD_EWS_SOURCE_ENABLE extern ST_TRANSMISSION m_ucTransMode; /*********************************************************************** LGD_EWS_INIT()ÇÔ¼ö´Â ¹Ýµå½Ã LGD_CHANNEL_START()ÇÔ¼ö È£ÃâÀü¿¡ È£ÃâµÇ¾î¾ß ÇÑ´Ù. LGD_CHANNEL_START()¿¡ ¸®ÅϰªÀÌ LGD_SUCCESSÀ̸é48ms ÁÖ±â·Î LGD_EWS_FRAMECHECK() ÇÔ¼ö¸¦ È£ÃâÇÑ´Ù. LGD_EWS_FRAMECHECK() == LGD_SUCCESS À̸é g_stEWSMsgº¯¼ö¿¡ °ªÀ» º¹»çÇϰí, LGD_EWS_INIT()ÇÔ¼ö¸¦ È£ÃâÇÑ´Ù. typedef struct _tagST_OUTPUT_EWS { LGD_INT16 nNextSeqNo; Next Segment No LGD_INT16 nTotalSeqNo; Total Segment No LGD_UINT16 nDataPos; data posion LGD_UINT8 ucEWSStartEn; EWS starting flag LGD_UINT8 ucIsEWSGood; EWS Parsing flag LGD_UINT8 ucMsgGovernment; ¸Þ½ÃÁö ¹ß·É±â°ü LGD_UINT8 ucMsgID; ¸Þ½ÃÁö °íÀ¯¹øÈ£ ST_DATE_T stDate; ÀϽà LGD_INT8 acKinds[4]; Àç³­ Á¾·ù LGD_UINT8 cPrecedence; ¿ì¼± ¼øÀ§ LGD_UINT32 ulTime; Àç³­ ½Ã°£ LGD_UINT8 ucForm; Àç³­ Áö¿ªÇü½Ä LGD_UINT8 ucResionCnt; Àç³­ Áö¿ª¼ö LGD_UINT8 aucResionCode[11]; Áö¿ª ÄÚµå LGD_DOUBLE32 fEWSCode; LGD_INT8 acOutputBuff[EWS_OUTPUT_BUFF_MAX]; Àç³­ ³»¿ë. }ST_OUTPUT_EWS, *PST_OUTPUT_EWS; ***********************************************************************/ ST_OUTPUT_EWS g_stEWSMsg; ST_OUTPUT_EWS* LGD_GET_EWS_DB(void) { return &g_stEWSMsg; } LGD_UINT32 YMDtoMJD(ST_DATE_T stDate) { LGD_UINT16 wMJD; LGD_UINT32 lYear, lMouth, lDay, L; LGD_UINT32 lTemp1, lTemp2; lYear = (LGD_UINT32)stDate.usYear - (LGD_UINT32)1900; lMouth = stDate.ucMonth; lDay = stDate.ucDay; if(lMouth == 1 || lMouth == 2) L = 1; else L = 0; lTemp1 = (lYear - L) * 36525L / 100L; lTemp2 = (lMouth + 1L + L * 12L) * 306001L / 10000L; wMJD = (LGD_UINT16)(14956 + lDay + lTemp1 + lTemp2); return wMJD; } void MJDtoYMD(LGD_UINT16 wMJD, ST_DATE_T *pstDate) { LGD_UINT32 lYear, lMouth, lTemp; lYear = (wMJD * 100L - 1507820L) / 36525L; lMouth = ((wMJD * 10000L - 149561000L) - (lYear * 36525L / 100L) * 10000L) / 306001L; pstDate->ucDay = (LGD_UINT8)(wMJD - 14956L - (lYear * 36525L / 100L) - (lMouth * 306001L / 10000L)); if(lMouth == 14 || lMouth == 15) lTemp = 1; else lTemp = 0; pstDate->usYear = (LGD_UINT16)(lYear + lTemp + 1900); pstDate->ucMonth = (LGD_UINT8)(lMouth - 1 - lTemp * 12); } void LGD_EWS_INIT(void) { memset(&g_stEWSMsg, 0 , sizeof(ST_OUTPUT_EWS)); } void LGD_TYPE5_EXTENSION2(ST_FIB_INFO* pFibInfo) { ST_FIG_HEAD* pHeader; ST_TYPE_5* pType; ST_EWS_INFO* pEwsInfo; ST_EWS_TIME* pstEWSTime; LGD_UINT16 unData, nLoop; LGD_UINT32 ulData; LGD_UINT8 aucInfoBuff[5]; pHeader = (ST_FIG_HEAD*)&pFibInfo->aucBuff[pFibInfo->ucDataPos++]; pType = (ST_TYPE_5*)&pFibInfo->aucBuff[pFibInfo->ucDataPos++]; if(pType->ITEM.bitD2 == 1) { unData = LGD_GET_WORDDATA(pFibInfo); pEwsInfo = (ST_EWS_INFO*)&unData; if(!pEwsInfo->ITEM.bitThisSeqNo) { LGD_EWS_INIT(); for(nLoop = 0; nLoop < 3; nLoop++) g_stEWSMsg.acKinds[nLoop] = LGD_GET_BYTEDATA(pFibInfo); for(nLoop = 5; nLoop > 0; nLoop--) aucInfoBuff[nLoop-1] = LGD_GET_BYTEDATA(pFibInfo); ulData = ((aucInfoBuff[4]&0x3f)<<24) | (aucInfoBuff[3]<<16) | (aucInfoBuff[2]<<8) | aucInfoBuff[1]; ulData = ulData >> 2; pstEWSTime = (ST_EWS_TIME*)&ulData; MJDtoYMD(pstEWSTime->ITEM.bitMJD, &g_stEWSMsg.stDate); g_stEWSMsg.stDate.ucHour = (pstEWSTime->ITEM.bitUTCHours + 9) % 24; g_stEWSMsg.stDate.ucMinutes = pstEWSTime->ITEM.bitUTCMinutes; g_stEWSMsg.nTotalSeqNo = pEwsInfo->ITEM.bitTotalNo; g_stEWSMsg.ucMsgGovernment = pEwsInfo->ITEM.bitMsgGovernment; g_stEWSMsg.ucMsgID = pEwsInfo->ITEM.bitID; g_stEWSMsg.cPrecedence = ((aucInfoBuff[4] >> 6) & 0x3); g_stEWSMsg.ulTime = ulData; g_stEWSMsg.ucForm = (((aucInfoBuff[1] & 0x3)<<1) | (aucInfoBuff[0] >> 7)); g_stEWSMsg.ucResionCnt = ((aucInfoBuff[0]>>3) & 0xf); g_stEWSMsg.nNextSeqNo++; if(g_stEWSMsg.nTotalSeqNo) g_stEWSMsg.ucEWSStartEn = LGD_SUCCESS; for(nLoop = 0; nLoop < 10; nLoop++) g_stEWSMsg.aucResionCode[nLoop] = LGD_GET_BYTEDATA(pFibInfo); g_stEWSMsg.fEWSCode = atof((char*)g_stEWSMsg.aucResionCode); for( ; nLoop < (pHeader->ITEM.bitLength - 11); nLoop++) g_stEWSMsg.acOutputBuff[g_stEWSMsg.nDataPos++] = LGD_GET_BYTEDATA(pFibInfo); } else if(g_stEWSMsg.ucEWSStartEn == LGD_SUCCESS) { if(g_stEWSMsg.nNextSeqNo != pEwsInfo->ITEM.bitThisSeqNo){ LGD_EWS_INIT(); pFibInfo->ucDataPos += (pHeader->ITEM.bitLength + 1); return; } g_stEWSMsg.nNextSeqNo = pEwsInfo->ITEM.bitThisSeqNo + 1; for(nLoop = 0; nLoop < (pHeader->ITEM.bitLength - 3); nLoop++) g_stEWSMsg.acOutputBuff[g_stEWSMsg.nDataPos++] = LGD_GET_BYTEDATA(pFibInfo); if(pEwsInfo->ITEM.bitThisSeqNo == g_stEWSMsg.nTotalSeqNo) g_stEWSMsg.ucIsEWSGood = LGD_SUCCESS; } else pFibInfo->ucDataPos += (pHeader->ITEM.bitLength + 1); } else pFibInfo->ucDataPos += (pHeader->ITEM.bitLength + 1); } void LGD_SET_TYPE_5(ST_FIB_INFO* pFibInfo) { ST_TYPE_5* pExtern; ST_FIG_HEAD* pHeader; LGD_UINT8 ucType, ucHeader; ucHeader = LGD_GETAT_HEADER(pFibInfo); ucType = LGD_GETAT_TYPE(pFibInfo); pHeader = (ST_FIG_HEAD*)&ucHeader; pExtern = (ST_TYPE_5*)&ucType; switch(pExtern->ITEM.bitExtension){ case EXTENSION_2: LGD_TYPE5_EXTENSION2(pFibInfo); break; default: pFibInfo->ucDataPos += (pHeader->ITEM.bitLength + 1); break; } } LGD_UINT8 LGD_EWS_PARSING(LGD_UINT8* pucFicBuff, LGD_INT32 uFicLength) { ST_FIB_INFO* pstFib; ST_FIG_HEAD* pHeader; ST_FIC stEWS; LGD_UINT8 ucLoop, ucHeader, ucBlockNum; LGD_UINT16 uiTempIndex = 0; ucBlockNum = uFicLength / FIB_SIZE; pstFib = &stEWS.stBlock; for(ucLoop = 0; ucLoop < ucBlockNum; ucLoop++) { LGD_SET_UPDATEFIC(pstFib, &pucFicBuff[ucLoop*FIB_SIZE]); if(!pstFib->uiIsCRC) continue; while(pstFib->ucDataPos < FIB_SIZE-2) { ucHeader = LGD_GETAT_HEADER(pstFib); pHeader = (ST_FIG_HEAD*)&ucHeader; if(!LGD_GET_FINDTYPE(pHeader) || !LGD_GET_NULLBLOCK(pHeader) || !LGD_GET_FINDLENGTH(pHeader)) break; switch(pHeader->ITEM.bitType) { case FIG_FICDATA_CHANNEL : LGD_SET_TYPE_5(pstFib); break; default : pstFib->ucDataPos += pHeader->ITEM.bitLength + 1;break; } } if(g_stEWSMsg.ucIsEWSGood == LGD_SUCCESS) return LGD_SUCCESS; } return LGD_ERROR; } LGD_UINT8 LGD_EWS_FRAMECHECK(LGD_UINT8 ucI2CID) { LGD_UINT16 wFicLen, uFIBCnt; LGD_UINT8 abyBuff[MAX_FIC_SIZE]; uFIBCnt = LGD_GET_FIB_CNT(m_ucTransMode); if(!(LGD_CMD_READ(ucI2CID, APB_VTB_BASE+ 0x00) & 0x4000)) return LGD_ERROR; wFicLen = LGD_CMD_READ(ucI2CID, APB_VTB_BASE+ 0x09); if(!wFicLen) return LGD_ERROR; wFicLen++; if(wFicLen != (uFIBCnt*FIB_SIZE)) return LGD_ERROR; LGD_CMD_READ_BURST(ucI2CID, APB_FIC_BASE, abyBuff, wFicLen); if(LGD_EWS_PARSING(abyBuff, wFicLen)) return LGD_SUCCESS; return LGD_ERROR; } #endif
gpl-2.0
ryo-on/gcc-4.2.4-SCO-OpenServer5
gcc/testsuite/gcc.c-torture/execute/20040311-1.c
214
1051
/* Copyright (C) 2004 Free Software Foundation. Check that constant folding and RTL simplification of -(x >> y) doesn't break anything and produces the expected results. Written by Roger Sayle, 11th March 2004. */ extern void abort (void); #define INT_BITS (sizeof(int)*8) int test1(int x) { return -(x >> (INT_BITS-1)); } int test2(unsigned int x) { return -((int)(x >> (INT_BITS-1))); } int test3(int x) { int y; y = INT_BITS-1; return -(x >> y); } int test4(unsigned int x) { int y; y = INT_BITS-1; return -((int)(x >> y)); } int main() { if (test1(0) != 0) abort (); if (test1(1) != 0) abort (); if (test1(-1) != 1) abort (); if (test2(0) != 0) abort (); if (test2(1) != 0) abort (); if (test2((unsigned int)-1) != -1) abort (); if (test3(0) != 0) abort (); if (test3(1) != 0) abort (); if (test3(-1) != 1) abort (); if (test4(0) != 0) abort (); if (test4(1) != 0) abort (); if (test4((unsigned int)-1) != -1) abort (); return 0; }
gpl-2.0
djmax81/Suemax-kernel_Exynos5433_new
net/bridge/br_netlink.c
214
12332
/* * Bridge netlink control interface * * Authors: * Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/sock.h> #include <uapi/linux/if_bridge.h> #include "br_private.h" #include "br_private_stp.h" static inline size_t br_port_info_size(void) { return nla_total_size(1) /* IFLA_BRPORT_STATE */ + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ + nla_total_size(4) /* IFLA_BRPORT_COST */ + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + 0; } static inline size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */ } static int br_port_fill_attrs(struct sk_buff *skb, const struct net_bridge_port *p) { u8 mode = !!(p->flags & BR_HAIRPIN_MODE); if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE))) return -EMSGSIZE; return 0; } /* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev) { const struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } /* Check if the VID information is requested */ if (filter_mask & RTEXT_FILTER_BRVLAN) { struct nlattr *af; const struct net_port_vlans *pv; struct bridge_vlan_info vinfo; u16 vid; u16 pvid; if (port) pv = nbp_get_vlan_info(port); else pv = br_get_vlan_info(br); if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) goto done; af = nla_nest_start(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; pvid = br_get_pvid(pv); for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { vinfo.vid = vid; vinfo.flags = 0; if (vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } nla_nest_end(skb, af); } done: return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } /* * Notify listeners of a change in port information */ void br_ifinfo_notify(int event, struct net_bridge_port *port) { struct net *net; struct sk_buff *skb; int err = -ENOBUFS; if (!port) return; net = dev_net(port->dev); br_debug(port->br, "port %u(%s) event %d\n", (unsigned int)port->port_no, port->dev->name, event); skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev); if (err < 0) { /* -EMSGSIZE implies BUG in br_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_LINK, err); } /* * Dump information about all ports, in response to GETLINK */ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask) { int err = 0; struct net_bridge_port *port = br_port_get_rcu(dev); /* not a bridge port and */ if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN)) goto out; err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI, filter_mask, dev); out: return err; } static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = { [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, .len = sizeof(struct bridge_vlan_info), }, }; static int br_afspec(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *af_spec, int cmd) { struct nlattr *tb[IFLA_BRIDGE_MAX+1]; int err = 0; err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy); if (err) return err; if (tb[IFLA_BRIDGE_VLAN_INFO]) { struct bridge_vlan_info *vinfo; vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); if (vinfo->vid >= VLAN_N_VID) return -EINVAL; switch (cmd) { case RTM_SETLINK: if (p) { err = nbp_vlan_add(p, vinfo->vid, vinfo->flags); if (err) break; if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) err = br_vlan_add(p->br, vinfo->vid, vinfo->flags); } else err = br_vlan_add(br, vinfo->vid, vinfo->flags); if (err) break; break; case RTM_DELLINK: if (p) { nbp_vlan_delete(p, vinfo->vid); if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) br_vlan_delete(p->br, vinfo->vid); } else br_vlan_delete(br, vinfo->vid); break; } } return err; } static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, [IFLA_BRPORT_COST] = { .type = NLA_U32 }, [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ static int br_set_port_state(struct net_bridge_port *p, u8 state) { if (state > BR_STATE_BLOCKING) return -EINVAL; /* if kernel STP is running, don't allow changes */ if (p->br->stp_enabled == BR_KERNEL_STP) return -EBUSY; /* if device is not up, change is not allowed * if link is not present, only allowable state is disabled */ if (!netif_running(p->dev) || (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) return -ENETDOWN; p->state = state; br_log_state(p); br_port_state_selection(p->br); return 0; } /* Set/clear or port flags based on attribute */ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], int attrtype, unsigned long mask) { if (tb[attrtype]) { u8 flag = nla_get_u8(tb[attrtype]); if (flag) p->flags |= mask; else p->flags &= ~mask; } } /* Process bridge protocol info on port */ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) { int err; br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); if (err) return err; } if (tb[IFLA_BRPORT_PRIORITY]) { err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); if (err) return err; } if (tb[IFLA_BRPORT_STATE]) { err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); if (err) return err; } return 0; } /* Change state and parameters on port. */ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) { struct nlattr *protinfo; struct nlattr *afspec; struct net_bridge_port *p; struct nlattr *tb[IFLA_BRPORT_MAX + 1]; int err = 0; protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!protinfo && !afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself if the AF_SPEC * is set to see if someone is setting vlan info on the brigde */ if (!p && !afspec) return -EINVAL; if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, ifla_brport_policy); if (err) return err; spin_lock_bh(&p->br->lock); err = br_setport(p, tb); spin_unlock_bh(&p->br->lock); } else { /* Binary compatability with old RSTP */ if (nla_len(protinfo) < sizeof(u8)) return -EINVAL; spin_lock_bh(&p->br->lock); err = br_set_port_state(p, nla_get_u8(protinfo)); spin_unlock_bh(&p->br->lock); } if (err) goto out; } if (afspec) { err = br_afspec((struct net_bridge *)netdev_priv(dev), p, afspec, RTM_SETLINK); } if (err == 0) br_ifinfo_notify(RTM_NEWLINK, p); out: return err; } /* Delete port information */ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh) { struct nlattr *afspec; struct net_bridge_port *p; int err; afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself as well */ if (!p && !(dev->priv_flags & IFF_EBRIDGE)) return -EINVAL; err = br_afspec((struct net_bridge *)netdev_priv(dev), p, afspec, RTM_DELLINK); return err; } static int br_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static size_t br_get_link_af_size(const struct net_device *dev) { struct net_port_vlans *pv; if (br_port_exists(dev)) pv = nbp_get_vlan_info(br_port_get_rcu(dev)); else if (dev->priv_flags & IFF_EBRIDGE) pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); else return 0; if (!pv) return 0; /* Each VLAN is returned in bridge_vlan_info along with flags */ return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info)); } static struct rtnl_af_ops br_af_ops = { .family = AF_BRIDGE, .get_link_af_size = br_get_link_af_size, }; struct rtnl_link_ops br_link_ops __read_mostly = { .kind = "bridge", .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, .validate = br_validate, .dellink = br_dev_delete, }; int __init br_netlink_init(void) { int err; br_mdb_init(); err = rtnl_af_register(&br_af_ops); if (err) goto out; err = rtnl_link_register(&br_link_ops); if (err) goto out_af; return 0; out_af: rtnl_af_unregister(&br_af_ops); out: br_mdb_uninit(); return err; } void __exit br_netlink_fini(void) { br_mdb_uninit(); rtnl_af_unregister(&br_af_ops); rtnl_link_unregister(&br_link_ops); }
gpl-2.0
ziozzang/kernel-rhel6
fs/fuse/inode.c
470
28799
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, struct kernel_param *kp); unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); __MODULE_PARM_TYPE(max_user_bgreq, "uint"); MODULE_PARM_DESC(max_user_bgreq, "Global limit for the maximum number of backgrounded requests an " "unprivileged user can set"); unsigned max_user_congthresh; module_param_call(max_user_congthresh, set_global_limit, param_get_uint, &max_user_congthresh, 0644); __MODULE_PARM_TYPE(max_user_congthresh, "uint"); MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); #define FUSE_SUPER_MAGIC 0x65735546 #define FUSE_DEFAULT_BLKSIZE 512 /** Maximum number of outstanding background requests */ #define FUSE_DEFAULT_MAX_BACKGROUND 12 /** Congestion starts at 75% of maximum */ #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) struct fuse_mount_data { int fd; unsigned rootmode; unsigned user_id; unsigned group_id; unsigned fd_present:1; unsigned rootmode_present:1; unsigned user_id_present:1; unsigned group_id_present:1; unsigned flags; unsigned max_read; unsigned blksize; }; static struct inode *fuse_alloc_inode(struct super_block *sb) { struct inode *inode; struct fuse_inode *fi; inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); if (!inode) return NULL; fi = get_fuse_inode(inode); fi->i_time = 0; fi->nodeid = 0; fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); fi->forget_req = fuse_request_alloc(); if (!fi->forget_req) { kmem_cache_free(fuse_inode_cachep, inode); return NULL; } return inode; } static void fuse_destroy_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); if (fi->forget_req) fuse_request_free(fi->forget_req); kmem_cache_free(fuse_inode_cachep, inode); } void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, u64 nodeid, u64 nlookup) { struct fuse_forget_in *inarg = &req->misc.forget_in; inarg->nlookup = nlookup; req->in.h.opcode = FUSE_FORGET; req->in.h.nodeid = nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(struct fuse_forget_in); req->in.args[0].value = inarg; fuse_request_send_noreply(fc, req); } static void fuse_clear_inode(struct inode *inode) { if (inode->i_sb->s_flags & MS_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup); fi->forget_req = NULL; } } static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) { if (*flags & MS_MANDLOCK) return -EINVAL; return 0; } void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; inode->i_ino = attr->ino; inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); inode->i_nlink = attr->nlink; inode->i_uid = attr->uid; inode->i_gid = attr->gid; inode->i_blocks = attr->blocks; inode->i_atime.tv_sec = attr->atime; inode->i_atime.tv_nsec = attr->atimensec; inode->i_mtime.tv_sec = attr->mtime; inode->i_mtime.tv_nsec = attr->mtimensec; inode->i_ctime.tv_sec = attr->ctime; inode->i_ctime.tv_nsec = attr->ctimensec; if (attr->blksize != 0) inode->i_blkbits = ilog2(attr->blksize); else inode->i_blkbits = inode->i_sb->s_blocksize_bits; /* * Don't set the sticky bit in i_mode, unless we want the VFS * to check permissions. This prevents failures due to the * check in may_delete(). */ fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { spin_unlock(&fc->lock); return; } fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); if (S_ISREG(inode->i_mode) && oldsize != attr->size) { truncate_pagecache(inode, oldsize, attr->size); invalidate_inode_pages2(inode->i_mapping); } } static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; if (S_ISREG(inode->i_mode)) { fuse_init_common(inode); fuse_init_file_inode(inode); } else if (S_ISDIR(inode->i_mode)) fuse_init_dir(inode); else if (S_ISLNK(inode->i_mode)) fuse_init_symlink(inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); } else BUG(); } int fuse_inode_eq(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; if (get_node_id(inode) == nodeid) return 1; else return 0; } static int fuse_inode_set(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; get_fuse_inode(inode)->nodeid = nodeid; return 0; } struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct inode *inode; struct fuse_inode *fi; struct fuse_conn *fc = get_fuse_conn_super(sb); retry: inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); if (!inode) return NULL; if ((inode->i_state & I_NEW)) { inode->i_flags |= S_NOATIME|S_NOCMTIME; inode->i_generation = generation; inode->i_data.backing_dev_info = &fc->bdi; fuse_init_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { /* Inode has changed type, any I/O on the old should fail */ make_bad_inode(inode); iput(inode); goto retry; } fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->nlookup++; spin_unlock(&fc->lock); fuse_change_attributes(inode, attr, attr_valid, attr_version); return inode; } int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) return -ENOENT; fuse_invalidate_attr(inode); if (offset >= 0) { pg_start = offset >> PAGE_CACHE_SHIFT; if (len <= 0) pg_end = -1; else pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } iput(inode); return 0; } static void fuse_umount_begin(struct super_block *sb) { fuse_abort_conn(get_fuse_conn_super(sb)); } static void fuse_send_destroy(struct fuse_conn *fc) { struct fuse_req *req = fc->destroy_req; if (req && fc->conn_init) { fc->destroy_req = NULL; req->in.h.opcode = FUSE_DESTROY; req->force = 1; fuse_request_send(fc, req); fuse_put_request(fc, req); } } static void fuse_bdi_destroy(struct fuse_conn *fc) { if (fc->bdi_initialized) bdi_destroy(&fc->bdi); } void fuse_conn_kill(struct fuse_conn *fc) { spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; spin_unlock(&fc->lock); /* Flush all readers on this fs */ kill_fasync(&fc->fasync, SIGIO, POLL_IN); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); fuse_conn_kill(fc); fuse_conn_put(fc); } static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; stbuf->f_files = attr->files; stbuf->f_ffree = attr->ffree; stbuf->f_namelen = attr->namelen; /* fsid is left zero */ } static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); struct fuse_req *req; struct fuse_statfs_out outarg; int err; if (!fuse_allow_task(fc, current)) { buf->f_type = FUSE_SUPER_MAGIC; return 0; } req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&outarg, 0, sizeof(outarg)); req->in.numargs = 0; req->in.h.opcode = FUSE_STATFS; req->in.h.nodeid = get_node_id(dentry->d_inode); req->out.numargs = 1; req->out.args[0].size = fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; if (!err) convert_fuse_statfs(buf, &outarg.st); fuse_put_request(fc, req); return err; } enum { OPT_FD, OPT_ROOTMODE, OPT_USER_ID, OPT_GROUP_ID, OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, OPT_BLKSIZE, OPT_ERR }; static const match_table_t tokens = { {OPT_FD, "fd=%u"}, {OPT_ROOTMODE, "rootmode=%o"}, {OPT_USER_ID, "user_id=%u"}, {OPT_GROUP_ID, "group_id=%u"}, {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, {OPT_BLKSIZE, "blksize=%u"}, {OPT_ERR, NULL} }; static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; d->blksize = FUSE_DEFAULT_BLKSIZE; while ((p = strsep(&opt, ",")) != NULL) { int token; int value; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case OPT_FD: if (match_int(&args[0], &value)) return 0; d->fd = value; d->fd_present = 1; break; case OPT_ROOTMODE: if (match_octal(&args[0], &value)) return 0; if (!fuse_valid_type(value)) return 0; d->rootmode = value; d->rootmode_present = 1; break; case OPT_USER_ID: if (match_int(&args[0], &value)) return 0; d->user_id = value; d->user_id_present = 1; break; case OPT_GROUP_ID: if (match_int(&args[0], &value)) return 0; d->group_id = value; d->group_id_present = 1; break; case OPT_DEFAULT_PERMISSIONS: d->flags |= FUSE_DEFAULT_PERMISSIONS; break; case OPT_ALLOW_OTHER: d->flags |= FUSE_ALLOW_OTHER; break; case OPT_MAX_READ: if (match_int(&args[0], &value)) return 0; d->max_read = value; break; case OPT_BLKSIZE: if (!is_bdev || match_int(&args[0], &value)) return 0; d->blksize = value; break; default: return 0; } } if (!d->fd_present || !d->rootmode_present || !d->user_id_present || !d->group_id_present) return 0; return 1; } static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) { struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb); seq_printf(m, ",user_id=%u", fc->user_id); seq_printf(m, ",group_id=%u", fc->group_id); if (fc->flags & FUSE_DEFAULT_PERMISSIONS) seq_puts(m, ",default_permissions"); if (fc->flags & FUSE_ALLOW_OTHER) seq_puts(m, ",allow_other"); if (fc->max_read != ~0) seq_printf(m, ",max_read=%u", fc->max_read); if (mnt->mnt_sb->s_bdev && mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize); return 0; } void fuse_conn_init(struct fuse_conn *fc) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); init_rwsem(&fc->killsb); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } EXPORT_SYMBOL_GPL(fuse_conn_init); void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { if (fc->destroy_req) fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); fc->release(fc); } } EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { atomic_inc(&fc->count); return fc; } EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) { struct fuse_attr attr; memset(&attr, 0, sizeof(attr)); attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; return fuse_iget(sb, 1, 0, &attr, 0, 0); } struct fuse_inode_handle { u64 nodeid; u32 generation; }; static struct dentry *fuse_get_dentry(struct super_block *sb, struct fuse_inode_handle *handle) { struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *inode; struct dentry *entry; int err = -ESTALE; if (handle->nodeid == 0) goto out_err; inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); if (!inode) { struct fuse_entry_out outarg; struct qstr name; if (!fc->export_support) goto out_err; name.len = 1; name.name = "."; err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, &inode); if (err && err != -ENOENT) goto out_err; if (err || !inode) { err = -ESTALE; goto out_err; } err = -EIO; if (get_node_id(inode) != handle->nodeid) goto out_iput; } err = -ESTALE; if (inode->i_generation != handle->generation) goto out_iput; entry = d_obtain_alias(inode); if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) { entry->d_op = &fuse_dentry_operations; fuse_invalidate_entry_cache(entry); } return entry; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; bool encode_parent = connectable && !S_ISDIR(inode->i_mode); int len = encode_parent ? 6 : 3; u64 nodeid; u32 generation; if (*max_len < len) return 255; nodeid = get_fuse_inode(inode)->nodeid; generation = inode->i_generation; fh[0] = (u32)(nodeid >> 32); fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; if (encode_parent) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; spin_unlock(&dentry->d_lock); fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); fh[5] = generation; } *max_len = len; return encode_parent ? 0x82 : 0x81; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle handle; if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) return NULL; handle.nodeid = (u64) fid->raw[0] << 32; handle.nodeid |= (u64) fid->raw[1]; handle.generation = fid->raw[2]; return fuse_get_dentry(sb, &handle); } static struct dentry *fuse_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle parent; if (fh_type != 0x82 || fh_len < 6) return NULL; parent.nodeid = (u64) fid->raw[3] << 32; parent.nodeid |= (u64) fid->raw[4]; parent.generation = fid->raw[5]; return fuse_get_dentry(sb, &parent); } static struct dentry *fuse_get_parent(struct dentry *child) { struct inode *child_inode = child->d_inode; struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; struct qstr name; int err; if (!fc->export_support) return ERR_PTR(-ESTALE); name.len = 2; name.name = ".."; err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), &name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); return ERR_PTR(err); } parent = d_obtain_alias(inode); if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) { parent->d_op = &fuse_dentry_operations; fuse_invalidate_entry_cache(parent); } return parent; } static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, .encode_fh = fuse_encode_fh, .get_parent = fuse_get_parent, }; static const struct super_operations fuse_super_operations = { .alloc_inode = fuse_alloc_inode, .destroy_inode = fuse_destroy_inode, .clear_inode = fuse_clear_inode, .drop_inode = generic_delete_inode, .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, .show_options = fuse_show_options, }; static void sanitize_global_limit(unsigned *limit) { if (*limit == 0) *limit = ((num_physpages << PAGE_SHIFT) >> 13) / sizeof(struct fuse_req); if (*limit >= 1 << 16) *limit = (1 << 16) - 1; } static int set_global_limit(const char *val, struct kernel_param *kp) { int rv; rv = param_set_uint(val, kp); if (rv) return rv; sanitize_global_limit((unsigned *)kp->arg); return 0; } static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) { int cap_sys_admin = capable(CAP_SYS_ADMIN); if (arg->minor < 13) return; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); if (arg->max_background) { fc->max_background = arg->max_background; if (!cap_sys_admin && fc->max_background > max_user_bgreq) fc->max_background = max_user_bgreq; } if (arg->congestion_threshold) { fc->congestion_threshold = arg->congestion_threshold; if (!cap_sys_admin && fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } } static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_out *arg = &req->misc.init_out; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) fc->conn_error = 1; else { unsigned long ra_pages; process_init_limits(fc, arg); if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { /* LOOKUP has dependency on proto version */ if (arg->flags & FUSE_EXPORT_SUPPORT) fc->export_support = 1; } if (arg->flags & FUSE_BIG_WRITES) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; } fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_in *arg = &req->misc.init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; req->out.numargs = 1; /* Variable length arguement used for backward compatibility with interface version < 7.5. Rest of init_out is zeroed by do_get_request(), so a short reply is not a problem */ req->out.argvar = 1; req->out.args[0].size = sizeof(struct fuse_init_out); req->out.args[0].value = &req->misc.init_out; req->end = process_init_reply; fuse_request_send_background(fc, req); } static void fuse_free_conn(struct fuse_conn *fc) { kfree(fc); } static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) { int err; fc->bdi.name = "fuse"; fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.unplug_io_fn = default_unplug_io_fn; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; err = bdi_init(&fc->bdi); if (err) return err; fc->bdi_initialized = 1; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) return err; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(&fc->bdi, 1); return 0; } static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; struct dentry *root_dentry; struct fuse_req *init_req; int err; int is_bdev = sb->s_bdev != NULL; err = -EINVAL; if (sb->s_flags & MS_MANDLOCK) goto err; if (!parse_fuse_opt((char *) data, &d, is_bdev)) goto err; if (is_bdev) { #ifdef CONFIG_BLOCK err = -EINVAL; if (!sb_set_blocksize(sb, d.blksize)) goto err; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_export_op = &fuse_export_operations; file = fget(d.fd); err = -EINVAL; if (!file) goto err; if (file->f_op != &fuse_dev_operations) goto err_fput; fc = kmalloc(sizeof(*fc), GFP_KERNEL); err = -ENOMEM; if (!fc) goto err_fput; fuse_conn_init(fc); fc->dev = sb->s_dev; fc->sb = sb; err = fuse_bdi_init(fc, sb); if (err) goto err_put_conn; sb->s_bdi = &fc->bdi; /* Handle umasking inside the fuse code */ if (sb->s_flags & MS_POSIXACL) fc->dont_mask = 1; sb->s_flags |= MS_POSIXACL; fc->release = fuse_free_conn; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); /* Used by get_root_inode() */ sb->s_fs_info = fc; err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); if (!root) goto err_put_conn; root_dentry = d_alloc_root(root); if (!root_dentry) { iput(root); goto err_put_conn; } init_req = fuse_request_alloc(); if (!init_req) goto err_put_root; if (is_bdev) { fc->destroy_req = fuse_request_alloc(); if (!fc->destroy_req) goto err_free_init_req; } mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) goto err_unlock; err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; fc->connected = 1; file->private_data = fuse_conn_get(fc); mutex_unlock(&fuse_mutex); /* * atomic_dec_and_test() in fput() provides the necessary * memory barrier for file->private_data to be visible on all * CPUs after this */ fput(file); fuse_send_init(fc, init_req); return 0; err_unlock: mutex_unlock(&fuse_mutex); err_free_init_req: fuse_request_free(init_req); err_put_root: dput(root_dentry); err_put_conn: fuse_bdi_destroy(fc); fuse_conn_put(fc); err_fput: fput(file); err: return err; } static int fuse_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) { return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); } static void fuse_kill_sb_anon(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_anon_super(sb); } static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", .fs_flags = FS_HAS_SUBTYPE, .get_sb = fuse_get_sb, .kill_sb = fuse_kill_sb_anon, }; #ifdef CONFIG_BLOCK static int fuse_get_sb_blk(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) { return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super, mnt); } static void fuse_kill_sb_blk(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_block_super(sb); } static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", .get_sb = fuse_get_sb_blk, .kill_sb = fuse_kill_sb_blk, .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, }; static inline int register_fuseblk(void) { return register_filesystem(&fuseblk_fs_type); } static inline void unregister_fuseblk(void) { unregister_filesystem(&fuseblk_fs_type); } #else static inline int register_fuseblk(void) { return 0; } static inline void unregister_fuseblk(void) { } #endif static void fuse_inode_init_once(void *foo) { struct inode *inode = foo; inode_init_once(inode); } static int __init fuse_fs_init(void) { int err; err = register_filesystem(&fuse_fs_type); if (err) goto out; err = register_fuseblk(); if (err) goto out_unreg; fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), 0, SLAB_HWCACHE_ALIGN, fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out_unreg2; return 0; out_unreg2: unregister_fuseblk(); out_unreg: unregister_filesystem(&fuse_fs_type); out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } static struct kobject *fuse_kobj; static struct kobject *connections_kobj; static int fuse_sysfs_init(void) { int err; fuse_kobj = kobject_create_and_add("fuse", fs_kobj); if (!fuse_kobj) { err = -ENOMEM; goto out_err; } connections_kobj = kobject_create_and_add("connections", fuse_kobj); if (!connections_kobj) { err = -ENOMEM; goto out_fuse_unregister; } return 0; out_fuse_unregister: kobject_put(fuse_kobj); out_err: return err; } static void fuse_sysfs_cleanup(void) { kobject_put(connections_kobj); kobject_put(fuse_kobj); } static int __init fuse_init(void) { int res; printk(KERN_INFO "fuse init (API version %i.%i)\n", FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); INIT_LIST_HEAD(&fuse_conn_list); res = fuse_fs_init(); if (res) goto err; res = fuse_dev_init(); if (res) goto err_fs_cleanup; res = fuse_sysfs_init(); if (res) goto err_dev_cleanup; res = fuse_ctl_init(); if (res) goto err_sysfs_cleanup; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); return 0; err_sysfs_cleanup: fuse_sysfs_cleanup(); err_dev_cleanup: fuse_dev_cleanup(); err_fs_cleanup: fuse_fs_cleanup(); err: return res; } static void __exit fuse_exit(void) { printk(KERN_DEBUG "fuse exit\n"); fuse_ctl_cleanup(); fuse_sysfs_cleanup(); fuse_fs_cleanup(); fuse_dev_cleanup(); } module_init(fuse_init); module_exit(fuse_exit);
gpl-2.0
kannu1994/NoFrills-Samsung-kernel
arch/arm/mach-omap1/board-perseus2.c
726
7691
/* * linux/arch/arm/mach-omap1/board-perseus2.c * * Modified from board-generic.c * * Original OMAP730 support by Jean Pihet <j-pihet@ti.com> * Updated for 2.6 by Kevin Hilman <kjh@hilman.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/tc.h> #include <mach/gpio.h> #include <plat/mux.h> #include <plat/fpga.h> #include <plat/flash.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/board.h> static int p2_keymap[] = { KEY(0,0,KEY_UP), KEY(0,1,KEY_RIGHT), KEY(0,2,KEY_LEFT), KEY(0,3,KEY_DOWN), KEY(0,4,KEY_ENTER), KEY(1,0,KEY_F10), KEY(1,1,KEY_SEND), KEY(1,2,KEY_END), KEY(1,3,KEY_VOLUMEDOWN), KEY(1,4,KEY_VOLUMEUP), KEY(1,5,KEY_RECORD), KEY(2,0,KEY_F9), KEY(2,1,KEY_3), KEY(2,2,KEY_6), KEY(2,3,KEY_9), KEY(2,4,KEY_KPDOT), KEY(3,0,KEY_BACK), KEY(3,1,KEY_2), KEY(3,2,KEY_5), KEY(3,3,KEY_8), KEY(3,4,KEY_0), KEY(3,5,KEY_KPSLASH), KEY(4,0,KEY_HOME), KEY(4,1,KEY_1), KEY(4,2,KEY_4), KEY(4,3,KEY_7), KEY(4,4,KEY_KPASTERISK), KEY(4,5,KEY_POWER), 0 }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { [0] = { .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ .end = H2P2_DBG_FPGA_ETHR_START + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .start = INT_7XX_MPU_EXT_NIRQ, .end = 0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct mtd_partition nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* rest of flash is a file system */ { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 }, }; static struct physmap_flash_data nor_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = nor_partitions, .nr_parts = ARRAY_SIZE(nor_partitions), }; static struct resource nor_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nor_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &nor_data, }, .num_resources = 1, .resource = &nor_resource, }; static void nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long mask; if (cmd == NAND_CMD_NONE) return; mask = (ctrl & NAND_CLE) ? 0x02 : 0; if (ctrl & NAND_ALE) mask |= 0x04; writeb(cmd, (unsigned long)this->IO_ADDR_W | mask); } #define P2_NAND_RB_GPIO_PIN 62 static int nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(P2_NAND_RB_GPIO_PIN); } static const char *part_probes[] = { "cmdlinepart", NULL }; static struct platform_nand_data nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .options = NAND_SAMSUNG_LP_OPTIONS, .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = nand_cmd_ctl, .dev_ready = nand_dev_ready, }, }; static struct resource nand_resource = { .start = OMAP_CS3_PHYS, .end = OMAP_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nand_device = { .name = "gen_nand", .id = 0, .dev = { .platform_data = &nand_data, }, .num_resources = 1, .resource = &nand_resource, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource kp_resources[] = { [0] = { .start = INT_7XX_MPUIO_KEYPAD, .end = INT_7XX_MPUIO_KEYPAD, .flags = IORESOURCE_IRQ, }, }; static struct omap_kp_platform_data kp_data = { .rows = 8, .cols = 8, .keymap = p2_keymap, .keymapsize = ARRAY_SIZE(p2_keymap), .delay = 4, .dbounce = 1, }; static struct platform_device kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &kp_data, }, .num_resources = ARRAY_SIZE(kp_resources), .resource = kp_resources, }; static struct platform_device lcd_device = { .name = "lcd_p2", .id = -1, }; static struct platform_device *devices[] __initdata = { &nor_device, &nand_device, &smc91x_device, &kp_device, &lcd_device, }; static struct omap_lcd_config perseus2_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel perseus2_config[] __initdata = { { OMAP_TAG_LCD, &perseus2_lcd_config }, }; static void __init omap_perseus2_init(void) { if (gpio_request(P2_NAND_RB_GPIO_PIN, "NAND ready") < 0) BUG(); gpio_direction_input(P2_NAND_RB_GPIO_PIN); omap_cfg_reg(L3_1610_FLASH_CS2B_OE); omap_cfg_reg(M8_1610_FLASH_CS2B_WE); platform_add_devices(devices, ARRAY_SIZE(devices)); omap_board_config = perseus2_config; omap_board_config_size = ARRAY_SIZE(perseus2_config); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); } static void __init perseus2_init_smc91x(void) { fpga_write(1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); } static void __init omap_perseus2_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); omap_gpio_init(); perseus2_init_smc91x(); } /* Only FPGA needs to be mapped here. All others are done with ioremap */ static struct map_desc omap_perseus2_io_desc[] __initdata = { { .virtual = H2P2_DBG_FPGA_BASE, .pfn = __phys_to_pfn(H2P2_DBG_FPGA_START), .length = H2P2_DBG_FPGA_SIZE, .type = MT_DEVICE } }; static void __init omap_perseus2_map_io(void) { omap1_map_common_io(); iotable_init(omap_perseus2_io_desc, ARRAY_SIZE(omap_perseus2_io_desc)); /* Early, board-dependent init */ /* * Hold GSM Reset until needed */ omap_writew(omap_readw(OMAP7XX_DSP_M_CTL) & ~1, OMAP7XX_DSP_M_CTL); /* * UARTs -> done automagically by 8250 driver */ /* * CSx timings, GPIO Mux ... setup */ /* Flash: CS0 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_0); omap_writel(0x00000088, OMAP7XX_FLASH_ACFG_0); /* * Ethernet support through the debug board * CS1 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_1); omap_writel(0x00000000, OMAP7XX_FLASH_ACFG_1); /* * Configure MPU_EXT_NIRQ IO in IO_CONF9 register, * It is used as the Ethernet controller interrupt */ omap_writel(omap_readl(OMAP7XX_IO_CONF_9) & 0x1FFFFFFF, OMAP7XX_IO_CONF_9); } MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2") /* Maintainer: Kevin Hilman <kjh@hilman.org> */ .phys_io = 0xfff00000, .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc, .boot_params = 0x10000100, .map_io = omap_perseus2_map_io, .init_irq = omap_perseus2_init_irq, .init_machine = omap_perseus2_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
spock1104/vm696-kernel
arch/sparc/kernel/power.c
726
1594
/* power.c: Power management driver. * * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/io.h> static void __iomem *power_reg; static irqreturn_t power_handler(int irq, void *dev_id) { orderly_poweroff(true); /* FIXME: Check registers for status... */ return IRQ_HANDLED; } static int __devinit has_button_interrupt(unsigned int irq, struct device_node *dp) { if (irq == 0xffffffff) return 0; if (!of_find_property(dp, "button", NULL)) return 0; return 1; } static int __devinit power_probe(struct of_device *op, const struct of_device_id *match) { struct resource *res = &op->resource[0]; unsigned int irq= op->irqs[0]; power_reg = of_ioremap(res, 0, 0x4, "power"); printk(KERN_INFO "%s: Control reg at %llx\n", op->dev.of_node->name, res->start); if (has_button_interrupt(irq, op->dev.of_node)) { if (request_irq(irq, power_handler, 0, "power", NULL) < 0) printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); } return 0; } static struct of_device_id __initdata power_match[] = { { .name = "power", }, {}, }; static struct of_platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", .owner = THIS_MODULE, .of_match_table = power_match, }, }; static int __init power_init(void) { return of_register_driver(&power_driver, &of_platform_bus_type); } device_initcall(power_init);
gpl-2.0
jomeister15/SGH-I727-kernel
arch/arm/mach-omap1/board-sx1.c
726
10377
/* * linux/arch/arm/mach-omap1/board-sx1.c * * Modified from board-generic.c * * Support for the Siemens SX1 mobile phone. * * Original version : Vladimir Ananiev (Vovan888-at-gmail com) * * Maintainters : Vladimir Ananiev (aka Vovan888), Sergge * oslik.ru * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/notifier.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/errno.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/gpio.h> #include <plat/flash.h> #include <plat/mux.h> #include <plat/dma.h> #include <plat/irda.h> #include <plat/usb.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/common.h> #include <plat/keypad.h> #include <plat/board-sx1.h> /* Write to I2C device */ int sx1_i2c_write_byte(u8 devaddr, u8 regoffset, u8 value) { struct i2c_adapter *adap; int err; struct i2c_msg msg[1]; unsigned char data[2]; adap = i2c_get_adapter(0); if (!adap) return -ENODEV; msg->addr = devaddr; /* I2C address of chip */ msg->flags = 0; msg->len = 2; msg->buf = data; data[0] = regoffset; /* register num */ data[1] = value; /* register data */ err = i2c_transfer(adap, msg, 1); i2c_put_adapter(adap); if (err >= 0) return 0; return err; } /* Read from I2C device */ int sx1_i2c_read_byte(u8 devaddr, u8 regoffset, u8 *value) { struct i2c_adapter *adap; int err; struct i2c_msg msg[1]; unsigned char data[2]; adap = i2c_get_adapter(0); if (!adap) return -ENODEV; msg->addr = devaddr; /* I2C address of chip */ msg->flags = 0; msg->len = 1; msg->buf = data; data[0] = regoffset; /* register num */ err = i2c_transfer(adap, msg, 1); msg->addr = devaddr; /* I2C address */ msg->flags = I2C_M_RD; msg->len = 1; msg->buf = data; err = i2c_transfer(adap, msg, 1); *value = data[0]; i2c_put_adapter(adap); if (err >= 0) return 0; return err; } /* set keyboard backlight intensity */ int sx1_setkeylight(u8 keylight) { if (keylight > SOFIA_MAX_LIGHT_VAL) keylight = SOFIA_MAX_LIGHT_VAL; return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_KEYLIGHT_REG, keylight); } /* get current keylight intensity */ int sx1_getkeylight(u8 * keylight) { return sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_KEYLIGHT_REG, keylight); } /* set LCD backlight intensity */ int sx1_setbacklight(u8 backlight) { if (backlight > SOFIA_MAX_LIGHT_VAL) backlight = SOFIA_MAX_LIGHT_VAL; return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_BACKLIGHT_REG, backlight); } /* get current LCD backlight intensity */ int sx1_getbacklight (u8 * backlight) { return sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_BACKLIGHT_REG, backlight); } /* set LCD backlight power on/off */ int sx1_setmmipower(u8 onoff) { int err; u8 dat = 0; err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat); if (err < 0) return err; if (onoff) dat |= SOFIA_MMILIGHT_POWER; else dat &= ~SOFIA_MMILIGHT_POWER; return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat); } /* set USB power on/off */ int sx1_setusbpower(u8 onoff) { int err; u8 dat = 0; err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat); if (err < 0) return err; if (onoff) dat |= SOFIA_USB_POWER; else dat &= ~SOFIA_USB_POWER; return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat); } EXPORT_SYMBOL(sx1_setkeylight); EXPORT_SYMBOL(sx1_getkeylight); EXPORT_SYMBOL(sx1_setbacklight); EXPORT_SYMBOL(sx1_getbacklight); EXPORT_SYMBOL(sx1_setmmipower); EXPORT_SYMBOL(sx1_setusbpower); /*----------- Keypad -------------------------*/ static int sx1_keymap[] = { KEY(5, 3, GROUP_0 | 117), /* camera Qt::Key_F17 */ KEY(0, 4, GROUP_0 | 114), /* voice memo Qt::Key_F14 */ KEY(1, 4, GROUP_2 | 114), /* voice memo */ KEY(2, 4, GROUP_3 | 114), /* voice memo */ KEY(0, 0, GROUP_1 | KEY_F12), /* red button Qt::Key_Hangup */ KEY(4, 3, GROUP_1 | KEY_LEFT), KEY(2, 3, GROUP_1 | KEY_DOWN), KEY(1, 3, GROUP_1 | KEY_RIGHT), KEY(0, 3, GROUP_1 | KEY_UP), KEY(3, 3, GROUP_1 | KEY_POWER), /* joystick press or Qt::Key_Select */ KEY(5, 0, GROUP_1 | KEY_1), KEY(4, 0, GROUP_1 | KEY_2), KEY(3, 0, GROUP_1 | KEY_3), KEY(3, 4, GROUP_1 | KEY_4), KEY(4, 4, GROUP_1 | KEY_5), KEY(5, 4, GROUP_1 | KEY_KPASTERISK),/* "*" */ KEY(4, 1, GROUP_1 | KEY_6), KEY(5, 1, GROUP_1 | KEY_7), KEY(3, 1, GROUP_1 | KEY_8), KEY(3, 2, GROUP_1 | KEY_9), KEY(5, 2, GROUP_1 | KEY_0), KEY(4, 2, GROUP_1 | 113), /* # F13 Toggle input method Qt::Key_F13 */ KEY(0, 1, GROUP_1 | KEY_F11), /* green button Qt::Key_Call */ KEY(1, 2, GROUP_1 | KEY_YEN), /* left soft Qt::Key_Context1 */ KEY(2, 2, GROUP_1 | KEY_F8), /* right soft Qt::Key_Back */ KEY(2, 1, GROUP_1 | KEY_LEFTSHIFT), /* shift */ KEY(1, 1, GROUP_1 | KEY_BACKSPACE), /* C (clear) */ KEY(0, 2, GROUP_1 | KEY_F7), /* menu Qt::Key_Menu */ 0 }; static struct resource sx1_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static struct omap_kp_platform_data sx1_kp_data = { .rows = 6, .cols = 6, .keymap = sx1_keymap, .keymapsize = ARRAY_SIZE(sx1_keymap), .delay = 80, }; static struct platform_device sx1_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &sx1_kp_data, }, .num_resources = ARRAY_SIZE(sx1_kp_resources), .resource = sx1_kp_resources, }; /*----------- IRDA -------------------------*/ static struct omap_irda_config sx1_irda_data = { .transceiver_cap = IR_SIRMODE, .rx_channel = OMAP_DMA_UART3_RX, .tx_channel = OMAP_DMA_UART3_TX, .dest_start = UART3_THR, .src_start = UART3_RHR, .tx_trigger = 0, .rx_trigger = 0, }; static struct resource sx1_irda_resources[] = { [0] = { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, }; static u64 irda_dmamask = 0xffffffff; static struct platform_device sx1_irda_device = { .name = "omapirda", .id = 0, .dev = { .platform_data = &sx1_irda_data, .dma_mask = &irda_dmamask, }, .num_resources = ARRAY_SIZE(sx1_irda_resources), .resource = sx1_irda_resources, }; /*----------- MTD -------------------------*/ static struct mtd_partition sx1_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0x01800000, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M - 2 * SZ_128K, .mask_flags = 0 }, /* file system */ { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct physmap_flash_data sx1_flash_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = sx1_partitions, .nr_parts = ARRAY_SIZE(sx1_partitions), }; #ifdef CONFIG_SX1_OLD_FLASH /* MTD Intel StrataFlash - old flashes */ static struct resource sx1_old_flash_resource[] = { [0] = { .start = OMAP_CS0_PHYS, /* Physical */ .end = OMAP_CS0_PHYS + SZ_16M - 1,, .flags = IORESOURCE_MEM, }, [1] = { .start = OMAP_CS1_PHYS, .end = OMAP_CS1_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device sx1_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &sx1_flash_data, }, .num_resources = 2, .resource = &sx1_old_flash_resource, }; #else /* MTD Intel 4000 flash - new flashes */ static struct resource sx1_new_flash_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device sx1_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &sx1_flash_data, }, .num_resources = 1, .resource = &sx1_new_flash_resource, }; #endif /*----------- USB -------------------------*/ static struct omap_usb_config sx1_usb_config __initdata = { .otg = 0, .register_dev = 1, .register_host = 0, .hmc_mode = 0, .pins[0] = 2, .pins[1] = 0, .pins[2] = 0, }; /*----------- LCD -------------------------*/ static struct platform_device sx1_lcd_device = { .name = "lcd_sx1", .id = -1, }; static struct omap_lcd_config sx1_lcd_config __initdata = { .ctrl_name = "internal", }; /*-----------------------------------------*/ static struct platform_device *sx1_devices[] __initdata = { &sx1_flash_device, &sx1_kp_device, &sx1_lcd_device, &sx1_irda_device, }; /*-----------------------------------------*/ static struct omap_board_config_kernel sx1_config[] __initdata = { { OMAP_TAG_LCD, &sx1_lcd_config }, }; /*-----------------------------------------*/ static void __init omap_sx1_init(void) { /* mux pins for uarts */ omap_cfg_reg(UART1_TX); omap_cfg_reg(UART1_RTS); omap_cfg_reg(UART2_TX); omap_cfg_reg(UART2_RTS); omap_cfg_reg(UART3_TX); omap_cfg_reg(UART3_RX); platform_add_devices(sx1_devices, ARRAY_SIZE(sx1_devices)); omap_board_config = sx1_config; omap_board_config_size = ARRAY_SIZE(sx1_config); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); omap_usb_init(&sx1_usb_config); sx1_mmc_init(); /* turn on USB power */ /* sx1_setusbpower(1); cant do it here because i2c is not ready */ gpio_request(1, "A_IRDA_OFF"); gpio_request(11, "A_SWITCH"); gpio_request(15, "A_USB_ON"); gpio_direction_output(1, 1); /*A_IRDA_OFF = 1 */ gpio_direction_output(11, 0); /*A_SWITCH = 0 */ gpio_direction_output(15, 0); /*A_USB_ON = 0 */ } /*----------------------------------------*/ static void __init omap_sx1_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); omap_gpio_init(); } /*----------------------------------------*/ static void __init omap_sx1_map_io(void) { omap1_map_common_io(); } MACHINE_START(SX1, "OMAP310 based Siemens SX1") .phys_io = 0xfff00000, .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc, .boot_params = 0x10000100, .map_io = omap_sx1_map_io, .init_irq = omap_sx1_init_irq, .init_machine = omap_sx1_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
scruiser/kernel
drivers/md/dm-delay.c
982
8131
/* * Copyright (C) 2005-2007 Red Hat GmbH * * A target that delays reads and/or writes and can send * them to different devices. * * This file is released under the GPL. */ #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "delay" struct delay_c { struct timer_list delay_timer; struct mutex timer_lock; struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; struct dm_dev *dev_read; sector_t start_read; unsigned read_delay; unsigned reads; struct dm_dev *dev_write; sector_t start_write; unsigned write_delay; unsigned writes; }; struct dm_delay_info { struct delay_c *context; struct list_head list; unsigned long expires; }; static DEFINE_MUTEX(delayed_bios_lock); static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) { mutex_lock(&dc->timer_lock); if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) mod_timer(&dc->delay_timer, expires); mutex_unlock(&dc->timer_lock); } static void flush_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; generic_make_request(bio); bio = n; } } static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) { struct dm_delay_info *delayed, *next; unsigned long next_expires = 0; int start_timer = 0; struct bio_list flush_bios = { }; mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { if (flush_all || time_after_eq(jiffies, delayed->expires)) { struct bio *bio = dm_bio_from_per_bio_data(delayed, sizeof(struct dm_delay_info)); list_del(&delayed->list); bio_list_add(&flush_bios, bio); if ((bio_data_dir(bio) == WRITE)) delayed->context->writes--; else delayed->context->reads--; continue; } if (!start_timer) { start_timer = 1; next_expires = delayed->expires; } else next_expires = min(next_expires, delayed->expires); } mutex_unlock(&delayed_bios_lock); if (start_timer) queue_timeout(dc, next_expires); return bio_list_get(&flush_bios); } static void flush_expired_bios(struct work_struct *work) { struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); flush_bios(flush_delayed_bios(dc, 0)); } /* * Mapping parameters: * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>] * * With separate write parameters, the first set is only used for reads. * Delays are specified in milliseconds. */ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; unsigned long long tmpll; char dummy; if (argc != 3 && argc != 6) { ti->error = "requires exactly 3 or 6 arguments"; return -EINVAL; } dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (!dc) { ti->error = "Cannot allocate context"; return -ENOMEM; } dc->reads = dc->writes = 0; if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid device sector"; goto bad; } dc->start_read = tmpll; if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) { ti->error = "Invalid delay"; goto bad; } if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dc->dev_read)) { ti->error = "Device lookup failed"; goto bad; } dc->dev_write = NULL; if (argc == 3) goto out; if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid write device sector"; goto bad_dev_read; } dc->start_write = tmpll; if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) { ti->error = "Invalid write delay"; goto bad_dev_read; } if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dc->dev_write)) { ti->error = "Write device lookup failed"; goto bad_dev_read; } out: dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); if (!dc->kdelayd_wq) { DMERR("Couldn't start kdelayd"); goto bad_queue; } setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); mutex_init(&dc->timer_lock); atomic_set(&dc->may_delay, 1); ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->per_bio_data_size = sizeof(struct dm_delay_info); ti->private = dc; return 0; bad_queue: if (dc->dev_write) dm_put_device(ti, dc->dev_write); bad_dev_read: dm_put_device(ti, dc->dev_read); bad: kfree(dc); return -EINVAL; } static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; destroy_workqueue(dc->kdelayd_wq); dm_put_device(ti, dc->dev_read); if (dc->dev_write) dm_put_device(ti, dc->dev_write); kfree(dc); } static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) { struct dm_delay_info *delayed; unsigned long expires = 0; if (!delay || !atomic_read(&dc->may_delay)) return 1; delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); delayed->context = dc; delayed->expires = expires = jiffies + (delay * HZ / 1000); mutex_lock(&delayed_bios_lock); if (bio_data_dir(bio) == WRITE) dc->writes++; else dc->reads++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); queue_timeout(dc, expires); return 0; } static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 0); del_timer_sync(&dc->delay_timer); flush_bios(flush_delayed_bios(dc, 1)); } static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 1); } static int delay_map(struct dm_target *ti, struct bio *bio) { struct delay_c *dc = ti->private; if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) bio->bi_iter.bi_sector = dc->start_write + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; bio->bi_iter.bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->read_delay, bio); } static void delay_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; switch (type) { case STATUSTYPE_INFO: DMEMIT("%u %u", dc->reads, dc->writes); break; case STATUSTYPE_TABLE: DMEMIT("%s %llu %u", dc->dev_read->name, (unsigned long long) dc->start_read, dc->read_delay); if (dc->dev_write) DMEMIT(" %s %llu %u", dc->dev_write->name, (unsigned long long) dc->start_write, dc->write_delay); break; } } static int delay_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct delay_c *dc = ti->private; int ret = 0; ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); if (ret) goto out; if (dc->dev_write) ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); out: return ret; } static struct target_type delay_target = { .name = "delay", .version = {1, 2, 1}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, .map = delay_map, .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, .iterate_devices = delay_iterate_devices, }; static int __init dm_delay_init(void) { int r; r = dm_register_target(&delay_target); if (r < 0) { DMERR("register failed %d", r); goto bad_register; } return 0; bad_register: return r; } static void __exit dm_delay_exit(void) { dm_unregister_target(&delay_target); } /* Module hooks */ module_init(dm_delay_init); module_exit(dm_delay_exit); MODULE_DESCRIPTION(DM_NAME " delay target"); MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
tjarnold/android_kernel_jewel_3.4.49
arch/mips/kernel/process.c
1494
11546
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2004 Thiemo Seufer */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/personality.h> #include <linux/sys.h> #include <linux/user.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/random.h> #include <asm/asm.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/pgtable.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/elf.h> #include <asm/isadep.h> #include <asm/inst.h> #include <asm/stacktrace.h> /* * The idle thread. There's no useful work to be done, so just try to conserve * power and have a low exit latency (ie sit in a loop waiting for somebody to * say that they'd like to reschedule) */ void __noreturn cpu_idle(void) { int cpu; /* CPU is going idle. */ cpu = smp_processor_id(); /* endless idle loop with no priority at all */ while (1) { tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched() && cpu_online(cpu)) { #ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); #endif if (cpu_wait) { /* Don't trace irqs off for idle */ stop_critical_timings(); (*cpu_wait)(); start_critical_timings(); } } #ifdef CONFIG_HOTPLUG_CPU if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) play_dead(); #endif rcu_idle_exit(); tick_nohz_idle_exit(); schedule_preempt_disabled(); } } asmlinkage void ret_from_fork(void); void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) { unsigned long status; /* New thread loses kernel privileges. */ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); #ifdef CONFIG_64BIT status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; #endif status |= KU_USER; regs->cp0_status = status; clear_used_math(); clear_fpu_owner(); if (cpu_has_dsp) __init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; } void exit_thread(void) { } void flush_thread(void) { } int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; unsigned long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC restores TCStatus after Status, and the CU bits * are aliased there. */ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); #endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) { memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu)); return 1; } void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs) { int i; for (i = 0; i < EF_R0; i++) gp[i] = 0; gp[EF_R0] = 0; for (i = 1; i <= 31; i++) gp[EF_R0 + i] = regs->regs[i]; gp[EF_R26] = 0; gp[EF_R27] = 0; gp[EF_LO] = regs->lo; gp[EF_HI] = regs->hi; gp[EF_CP0_EPC] = regs->cp0_epc; gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; gp[EF_CP0_STATUS] = regs->cp0_status; gp[EF_CP0_CAUSE] = regs->cp0_cause; #ifdef EF_UNUSED0 gp[EF_UNUSED0] = 0; #endif } int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { elf_dump_regs(*regs, task_pt_regs(tsk)); return 1; } int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) { memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); return 1; } /* * Create a kernel thread */ static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *)) { do_exit(fn(arg)); } long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.regs[4] = (unsigned long) arg; regs.regs[5] = (unsigned long) fn; regs.cp0_epc = (unsigned long) kernel_thread_helper; regs.cp0_status = read_c0_status(); #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2); #else regs.cp0_status |= ST0_EXL; #endif /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } /* * */ struct mips_frame_info { void *func; unsigned long func_size; int frame_size; int pc_offset; }; static inline int is_ra_save_ins(union mips_instruction *ip) { /* sw / sd $ra, offset($sp) */ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ip->i_format.rs == 29 && ip->i_format.rt == 31; } static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) { if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) return 0; return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; } static inline int is_sp_move_ins(union mips_instruction *ip) { /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) return 1; return 0; } static int get_frame_info(struct mips_frame_info *info) { union mips_instruction *ip = info->func; unsigned max_insns = info->func_size / sizeof(union mips_instruction); unsigned i; info->pc_offset = -1; info->frame_size = 0; if (!ip) goto err; if (max_insns == 0) max_insns = 128U; /* unknown function size */ max_insns = min(128U, max_insns); for (i = 0; i < max_insns; i++, ip++) { if (is_jal_jalr_jr_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) info->frame_size = - ip->i_format.simmediate; continue; } if (info->pc_offset == -1 && is_ra_save_ins(ip)) { info->pc_offset = ip->i_format.simmediate / sizeof(long); break; } } if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; if (info->pc_offset < 0) /* leaf */ return 1; /* prologue seems boggus... */ err: return -1; } static struct mips_frame_info schedule_mfi __read_mostly; static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); #endif schedule_mfi.func = schedule; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); /* * Without schedule() frame info, result given by * thread_saved_pc() and get_wchan() are not reliable. */ if (schedule_mfi.pc_offset < 0) printk("Can't analyze schedule() prologue at %p\n", schedule); return 0; } arch_initcall(frame_info_init); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { struct thread_struct *t = &tsk->thread; /* New born processes are a special case */ if (t->reg31 == (unsigned long) ret_from_fork) return t->reg31; if (schedule_mfi.pc_offset < 0) return 0; return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; } #ifdef CONFIG_KALLSYMS /* generic stack unwinding function */ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long *sp, unsigned long pc, unsigned long *ra) { struct mips_frame_info info; unsigned long size, ofs; int leaf; extern void ret_from_irq(void); extern void ret_from_exception(void); if (!stack_page) return 0; /* * If we reached the bottom of interrupt context, * return saved pc in pt_regs. */ if (pc == (unsigned long)ret_from_irq || pc == (unsigned long)ret_from_exception) { struct pt_regs *regs; if (*sp >= stack_page && *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { regs = (struct pt_regs *)*sp; pc = regs->cp0_epc; if (__kernel_text_address(pc)) { *sp = regs->regs[29]; *ra = regs->regs[31]; return pc; } } return 0; } if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) return 0; /* * Return ra if an exception occurred at the first instruction */ if (unlikely(ofs == 0)) { pc = *ra; *ra = 0; return pc; } info.func = (void *)(pc - ofs); info.func_size = ofs; /* analyze from start to ofs */ leaf = get_frame_info(&info); if (leaf < 0) return 0; if (*sp < stack_page || *sp + info.frame_size > stack_page + THREAD_SIZE - 32) return 0; if (leaf) /* * For some extreme cases, get_frame_info() can * consider wrongly a nested function as a leaf * one. In that cases avoid to return always the * same value. */ pc = pc != *ra ? *ra : 0; else pc = ((unsigned long *)(*sp))[info.pc_offset]; *sp += info.frame_size; *ra = 0; return __kernel_text_address(pc) ? pc : 0; } EXPORT_SYMBOL(unwind_stack_by_address); /* used by show_backtrace() */ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unsigned long pc, unsigned long *ra) { unsigned long stack_page = (unsigned long)task_stack_page(task); return unwind_stack_by_address(stack_page, sp, pc, ra); } #endif /* * get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; #ifdef CONFIG_KALLSYMS unsigned long sp; unsigned long ra = 0; #endif if (!task || task == current || task->state == TASK_RUNNING) goto out; if (!task_stack_page(task)) goto out; pc = thread_saved_pc(task); #ifdef CONFIG_KALLSYMS sp = task->thread.reg29 + schedule_mfi.frame_size; while (in_sched_functions(pc)) pc = unwind_stack(task, &sp, pc, &ra); #endif out: return pc; } /* * Don't forget that the stack pointer must be aligned on a 8 bytes * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. */ unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ALMASK; }
gpl-2.0
sjp38/linux.rpi
arch/sh/kernel/cpu/sh2a/setup-mxg.c
1750
5346
/* * Renesas MX-G (R8A03022BG) Setup * * Copyright (C) 2008, 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1, SCIF0, SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5, MTU2_TGI3B, MTU2_TGI3C, /* interrupt groups */ PINT, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73), INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75), INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77), INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79), INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95), INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97), INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99), INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101), INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221), INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223), INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225), INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227), INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229), INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231), INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233), INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235), INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237), INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239), INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241), INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243), INTC_IRQ(MTU2_TGI3B, 244), INTC_IRQ(MTU2_TGI3C, 245), INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247), INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249), INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251), INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253), INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } }, { 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } }, { 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, { 0xfffd9800, 0, 16, 4, /* IPR06 */ { } }, { 0xfffd9802, 0, 16, 4, /* IPR07 */ { } }, { 0xfffd9804, 0, 16, 4, /* IPR08 */ { } }, { 0xfffd9806, 0, 16, 4, /* IPR09 */ { } }, { 0xfffd9808, 0, 16, 4, /* IPR10 */ { } }, { 0xfffd980a, 0, 16, 4, /* IPR11 */ { } }, { 0xfffd980c, 0, 16, 4, /* IPR12 */ { } }, { 0xfffd980e, 0, 16, 4, /* IPR13 */ { } }, { 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } }, { 0xfffd9812, 0, 16, 4, /* IPR15 */ { SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } }, { 0xfffd9814, 0, 16, 4, /* IPR16 */ { MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } }, }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfffd9408, 0, 16, /* PINTER */ { 0, 0, 0, 0, 0, 0, 0, 0, PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, }; static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups, mask_registers, prio_registers, NULL); static struct resource mtu2_resources[] = { DEFINE_RES_MEM(0xff801000, 0x400), DEFINE_RES_IRQ_NAMED(228, "tgi0a"), DEFINE_RES_IRQ_NAMED(234, "tgi1a"), DEFINE_RES_IRQ_NAMED(240, "tgi2a"), }; static struct platform_device mtu2_device = { .name = "sh-mtu2", .id = -1, .resource = mtu2_resources, .num_resources = ARRAY_SIZE(mtu2_resources), }; static struct plat_sci_port scif0_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .type = PORT_SCIF, }; static struct resource scif0_resources[] = { DEFINE_RES_MEM(0xff804000, 0x100), DEFINE_RES_IRQ(220), }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .resource = scif0_resources, .num_resources = ARRAY_SIZE(scif0_resources), .dev = { .platform_data = &scif0_platform_data, }, }; static struct platform_device *mxg_devices[] __initdata = { &scif0_device, &mtu2_device, }; static int __init mxg_devices_setup(void) { return platform_add_devices(mxg_devices, ARRAY_SIZE(mxg_devices)); } arch_initcall(mxg_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *mxg_early_devices[] __initdata = { &scif0_device, &mtu2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(mxg_early_devices, ARRAY_SIZE(mxg_early_devices)); }
gpl-2.0
jonathanfisher/wl18xx
arch/sh/kernel/traps_32.c
1750
18859
/* * 'traps.c' handles hardware traps and faults after we have saved some * state in 'entry.S'. * * SuperH version: Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf * Copyright (C) 2000 David Howells * Copyright (C) 2002 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/hardirq.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/io.h> #include <linux/bug.h> #include <linux/debug_locks.h> #include <linux/kdebug.h> #include <linux/limits.h> #include <linux/sysfs.h> #include <linux/uaccess.h> #include <linux/perf_event.h> #include <asm/alignment.h> #include <asm/fpu.h> #include <asm/kprobes.h> #include <asm/traps.h> #include <asm/bl_bit.h> #ifdef CONFIG_CPU_SH2 # define TRAP_RESERVED_INST 4 # define TRAP_ILLEGAL_SLOT_INST 6 # define TRAP_ADDRESS_ERROR 9 # ifdef CONFIG_CPU_SH2A # define TRAP_UBC 12 # define TRAP_FPU_ERROR 13 # define TRAP_DIVZERO_ERROR 17 # define TRAP_DIVOVF_ERROR 18 # endif #else #define TRAP_RESERVED_INST 12 #define TRAP_ILLEGAL_SLOT_INST 13 #endif static inline void sign_extend(unsigned int count, unsigned char *dst) { #ifdef __LITTLE_ENDIAN__ if ((count == 1) && dst[0] & 0x80) { dst[1] = 0xff; dst[2] = 0xff; dst[3] = 0xff; } if ((count == 2) && dst[1] & 0x80) { dst[2] = 0xff; dst[3] = 0xff; } #else if ((count == 1) && dst[3] & 0x80) { dst[2] = 0xff; dst[1] = 0xff; dst[0] = 0xff; } if ((count == 2) && dst[2] & 0x80) { dst[1] = 0xff; dst[0] = 0xff; } #endif } static struct mem_access user_mem_access = { copy_from_user, copy_to_user, }; /* * handle an instruction that does an unaligned memory access by emulating the * desired behaviour * - note that PC _may not_ point to the faulting instruction * (if that instruction is in a branch delay slot) * - return 0 if emulation okay, -EFAULT on existential error */ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs, struct mem_access *ma) { int ret, index, count; unsigned long *rm, *rn; unsigned char *src, *dst; unsigned char __user *srcu, *dstu; index = (instruction>>8)&15; /* 0x0F00 */ rn = &regs->regs[index]; index = (instruction>>4)&15; /* 0x00F0 */ rm = &regs->regs[index]; count = 1<<(instruction&3); switch (count) { case 1: inc_unaligned_byte_access(); break; case 2: inc_unaligned_word_access(); break; case 4: inc_unaligned_dword_access(); break; case 8: inc_unaligned_multi_access(); break; } ret = -EFAULT; switch (instruction>>12) { case 0: /* mov.[bwl] to/from memory via r0+rn */ if (instruction & 8) { /* from memory */ srcu = (unsigned char __user *)*rm; srcu += regs->regs[0]; dst = (unsigned char *)rn; *(unsigned long *)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif if (ma->from(dst, srcu, count)) goto fetch_fault; sign_extend(count, dst); } else { /* to memory */ src = (unsigned char *)rm; #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif dstu = (unsigned char __user *)*rn; dstu += regs->regs[0]; if (ma->to(dstu, src, count)) goto fetch_fault; } ret = 0; break; case 1: /* mov.l Rm,@(disp,Rn) */ src = (unsigned char*) rm; dstu = (unsigned char __user *)*rn; dstu += (instruction&0x000F)<<2; if (ma->to(dstu, src, 4)) goto fetch_fault; ret = 0; break; case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ if (instruction & 4) *rn -= count; src = (unsigned char*) rm; dstu = (unsigned char __user *)*rn; #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif if (ma->to(dstu, src, count)) goto fetch_fault; ret = 0; break; case 5: /* mov.l @(disp,Rm),Rn */ srcu = (unsigned char __user *)*rm; srcu += (instruction & 0x000F) << 2; dst = (unsigned char *)rn; *(unsigned long *)dst = 0; if (ma->from(dst, srcu, 4)) goto fetch_fault; ret = 0; break; case 6: /* mov.[bwl] from memory, possibly with post-increment */ srcu = (unsigned char __user *)*rm; if (instruction & 4) *rm += count; dst = (unsigned char*) rn; *(unsigned long*)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif if (ma->from(dst, srcu, count)) goto fetch_fault; sign_extend(count, dst); ret = 0; break; case 8: switch ((instruction&0xFF00)>>8) { case 0x81: /* mov.w R0,@(disp,Rn) */ src = (unsigned char *) &regs->regs[0]; #if !defined(__LITTLE_ENDIAN__) src += 2; #endif dstu = (unsigned char __user *)*rm; /* called Rn in the spec */ dstu += (instruction & 0x000F) << 1; if (ma->to(dstu, src, 2)) goto fetch_fault; ret = 0; break; case 0x85: /* mov.w @(disp,Rm),R0 */ srcu = (unsigned char __user *)*rm; srcu += (instruction & 0x000F) << 1; dst = (unsigned char *) &regs->regs[0]; *(unsigned long *)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 2; #endif if (ma->from(dst, srcu, 2)) goto fetch_fault; sign_extend(2, dst); ret = 0; break; } break; case 9: /* mov.w @(disp,PC),Rn */ srcu = (unsigned char __user *)regs->pc; srcu += 4; srcu += (instruction & 0x00FF) << 1; dst = (unsigned char *)rn; *(unsigned long *)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 2; #endif if (ma->from(dst, srcu, 2)) goto fetch_fault; sign_extend(2, dst); ret = 0; break; case 0xd: /* mov.l @(disp,PC),Rn */ srcu = (unsigned char __user *)(regs->pc & ~0x3); srcu += 4; srcu += (instruction & 0x00FF) << 2; dst = (unsigned char *)rn; *(unsigned long *)dst = 0; if (ma->from(dst, srcu, 4)) goto fetch_fault; ret = 0; break; } return ret; fetch_fault: /* Argh. Address not only misaligned but also non-existent. * Raise an EFAULT and see if it's trapped */ die_if_no_fixup("Fault in unaligned fixup", regs, 0); return -EFAULT; } /* * emulate the instruction in the delay slot * - fetches the instruction from PC+2 */ static inline int handle_delayslot(struct pt_regs *regs, insn_size_t old_instruction, struct mem_access *ma) { insn_size_t instruction; void __user *addr = (void __user *)(regs->pc + instruction_size(old_instruction)); if (copy_from_user(&instruction, addr, sizeof(instruction))) { /* the instruction-fetch faulted */ if (user_mode(regs)) return -EFAULT; /* kernel */ die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); } return handle_unaligned_ins(instruction, regs, ma); } /* * handle an instruction that does an unaligned memory access * - have to be careful of branch delay-slot instructions that fault * SH3: * - if the branch would be taken PC points to the branch * - if the branch would not be taken, PC points to delay-slot * SH4: * - PC always points to delayed branch * - return 0 if handled, -EFAULT if failed (may not return if in kernel) */ /* Macros to determine offset from current PC for branch instructions */ /* Explicit type coercion is used to force sign extension where needed */ #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, struct mem_access *ma, int expected, unsigned long address) { u_int rm; int ret, index; /* * XXX: We can't handle mixed 16/32-bit instructions yet */ if (instruction_size(instruction) != 2) return -EINVAL; index = (instruction>>8)&15; /* 0x0F00 */ rm = regs->regs[index]; /* * Log the unexpected fixups, and then pass them on to perf. * * We intentionally don't report the expected cases to perf as * otherwise the trapped I/O case will skew the results too much * to be useful. */ if (!expected) { unaligned_fixups_notify(current, instruction, regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); } ret = -EFAULT; switch (instruction&0xF000) { case 0x0000: if (instruction==0x000B) { /* rts */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = regs->pr; } else if ((instruction&0x00FF)==0x0023) { /* braf @Rm */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += rm + 4; } else if ((instruction&0x00FF)==0x0003) { /* bsrf @Rm */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += rm + 4; } } else { /* mov.[bwl] to/from memory via r0+rn */ goto simple; } break; case 0x1000: /* mov.l Rm,@(disp,Rn) */ goto simple; case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ goto simple; case 0x4000: if ((instruction&0x00FF)==0x002B) { /* jmp @Rm */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = rm; } else if ((instruction&0x00FF)==0x000B) { /* jsr @Rm */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc = rm; } } else { /* mov.[bwl] to/from memory via r0+rn */ goto simple; } break; case 0x5000: /* mov.l @(disp,Rm),Rn */ goto simple; case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ goto simple; case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ switch (instruction&0x0F00) { case 0x0100: /* mov.w R0,@(disp,Rm) */ goto simple; case 0x0500: /* mov.w @(disp,Rm),R0 */ goto simple; case 0x0B00: /* bf lab - no delayslot*/ ret = 0; break; case 0x0F00: /* bf/s lab */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) != 0) regs->pc += 4; /* next after slot */ else #endif regs->pc += SH_PC_8BIT_OFFSET(instruction); } break; case 0x0900: /* bt lab - no delayslot */ ret = 0; break; case 0x0D00: /* bt/s lab */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) == 0) regs->pc += 4; /* next after slot */ else #endif regs->pc += SH_PC_8BIT_OFFSET(instruction); } break; } break; case 0x9000: /* mov.w @(disp,Rm),Rn */ goto simple; case 0xA000: /* bra label */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += SH_PC_12BIT_OFFSET(instruction); break; case 0xB000: /* bsr label */ ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += SH_PC_12BIT_OFFSET(instruction); } break; case 0xD000: /* mov.l @(disp,Rm),Rn */ goto simple; } return ret; /* handle non-delay-slot instruction */ simple: ret = handle_unaligned_ins(instruction, regs, ma); if (ret==0) regs->pc += instruction_size(instruction); return ret; } /* * Handle various address error exceptions: * - instruction address error: * misaligned PC * PC >= 0x80000000 in user mode * - data address error (read and write) * misaligned data access * access to >= 0x80000000 is user mode * Unfortuntaly we can't distinguish between instruction address error * and data address errors caused by read accesses. */ asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { unsigned long error_code = 0; mm_segment_t oldfs; siginfo_t info; insn_size_t instruction; int tmp; /* Intentional ifdef */ #ifdef CONFIG_CPU_HAS_SR_RB error_code = lookup_exception_vector(); #endif oldfs = get_fs(); if (user_mode(regs)) { int si_code = BUS_ADRERR; unsigned int user_action; local_irq_enable(); inc_unaligned_user_access(); set_fs(USER_DS); if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), sizeof(instruction))) { set_fs(oldfs); goto uspace_segv; } set_fs(oldfs); /* shout about userspace fixups */ unaligned_fixups_notify(current, instruction, regs); user_action = unaligned_user_action(); if (user_action & UM_FIXUP) goto fixup; if (user_action & UM_SIGNAL) goto uspace_segv; else { /* ignore */ regs->pc += instruction_size(instruction); return; } fixup: /* bad PC is not something we can fix */ if (regs->pc & 1) { si_code = BUS_ADRALN; goto uspace_segv; } set_fs(USER_DS); tmp = handle_unaligned_access(instruction, regs, &user_mem_access, 0, address); set_fs(oldfs); if (tmp == 0) return; /* sorted */ uspace_segv: printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " "access (PC %lx PR %lx)\n", current->comm, regs->pc, regs->pr); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *)address; force_sig_info(SIGBUS, &info, current); } else { inc_unaligned_kernel_access(); if (regs->pc & 1) die("unaligned program counter", regs, error_code); set_fs(KERNEL_DS); if (copy_from_user(&instruction, (void __user *)(regs->pc), sizeof(instruction))) { /* Argh. Fault on the instruction itself. This should never happen non-SMP */ set_fs(oldfs); die("insn faulting in do_address_error", regs, 0); } unaligned_fixups_notify(current, instruction, regs); handle_unaligned_access(instruction, regs, &user_mem_access, 0, address); set_fs(oldfs); } } #ifdef CONFIG_SH_DSP /* * SH-DSP support gerg@snapgear.com. */ int is_dsp_inst(struct pt_regs *regs) { unsigned short inst = 0; /* * Safe guard if DSP mode is already enabled or we're lacking * the DSP altogether. */ if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) return 0; get_user(inst, ((unsigned short *) regs->pc)); inst &= 0xf000; /* Check for any type of DSP or support instruction */ if ((inst == 0xf000) || (inst == 0x4000)) return 1; return 0; } #else #define is_dsp_inst(regs) (0) #endif /* CONFIG_SH_DSP */ #ifdef CONFIG_CPU_SH2A asmlinkage void do_divide_error(unsigned long r4) { siginfo_t info; switch (r4) { case TRAP_DIVZERO_ERROR: info.si_code = FPE_INTDIV; break; case TRAP_DIVOVF_ERROR: info.si_code = FPE_INTOVF; break; } force_sig_info(SIGFPE, &info, current); } #endif asmlinkage void do_reserved_inst(void) { struct pt_regs *regs = current_pt_regs(); unsigned long error_code; struct task_struct *tsk = current; #ifdef CONFIG_SH_FPU_EMU unsigned short inst = 0; int err; get_user(inst, (unsigned short*)regs->pc); err = do_fpu_inst(inst, regs); if (!err) { regs->pc += instruction_size(inst); return; } /* not a FPU inst. */ #endif #ifdef CONFIG_SH_DSP /* Check if it's a DSP instruction */ if (is_dsp_inst(regs)) { /* Enable DSP mode, and restart instruction. */ regs->sr |= SR_DSP; /* Save DSP mode */ tsk->thread.dsp_status.status |= SR_DSP; return; } #endif error_code = lookup_exception_vector(); local_irq_enable(); force_sig(SIGILL, tsk); die_if_no_fixup("reserved instruction", regs, error_code); } #ifdef CONFIG_SH_FPU_EMU static int emulate_branch(unsigned short inst, struct pt_regs *regs) { /* * bfs: 8fxx: PC+=d*2+4; * bts: 8dxx: PC+=d*2+4; * bra: axxx: PC+=D*2+4; * bsr: bxxx: PC+=D*2+4 after PR=PC+4; * braf:0x23: PC+=Rn*2+4; * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; * jmp: 4x2b: PC=Rn; * jsr: 4x0b: PC=Rn after PR=PC+4; * rts: 000b: PC=PR; */ if (((inst & 0xf000) == 0xb000) || /* bsr */ ((inst & 0xf0ff) == 0x0003) || /* bsrf */ ((inst & 0xf0ff) == 0x400b)) /* jsr */ regs->pr = regs->pc + 4; if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */ regs->pc += SH_PC_8BIT_OFFSET(inst); return 0; } if ((inst & 0xe000) == 0xa000) { /* bra, bsr */ regs->pc += SH_PC_12BIT_OFFSET(inst); return 0; } if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */ regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; return 0; } if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */ regs->pc = regs->regs[(inst & 0x0f00) >> 8]; return 0; } if ((inst & 0xffff) == 0x000b) { /* rts */ regs->pc = regs->pr; return 0; } return 1; } #endif asmlinkage void do_illegal_slot_inst(void) { struct pt_regs *regs = current_pt_regs(); unsigned long inst; struct task_struct *tsk = current; if (kprobe_handle_illslot(regs->pc) == 0) return; #ifdef CONFIG_SH_FPU_EMU get_user(inst, (unsigned short *)regs->pc + 1); if (!do_fpu_inst(inst, regs)) { get_user(inst, (unsigned short *)regs->pc); if (!emulate_branch(inst, regs)) return; /* fault in branch.*/ } /* not a FPU inst. */ #endif inst = lookup_exception_vector(); local_irq_enable(); force_sig(SIGILL, tsk); die_if_no_fixup("illegal slot instruction", regs, inst); } asmlinkage void do_exception_error(void) { long ex; ex = lookup_exception_vector(); die_if_kernel("exception", current_pt_regs(), ex); } void per_cpu_trap_init(void) { extern void *vbr_base; /* NOTE: The VBR value should be at P1 (or P2, virtural "fixed" address space). It's definitely should not in physical address. */ asm volatile("ldc %0, vbr" : /* no output */ : "r" (&vbr_base) : "memory"); /* disable exception blocking now when the vbr has been setup */ clear_bl_bit(); } void *set_exception_table_vec(unsigned int vec, void *handler) { extern void *exception_handling_table[]; void *old_handler; old_handler = exception_handling_table[vec]; exception_handling_table[vec] = handler; return old_handler; } void __init trap_init(void) { set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ defined(CONFIG_SH_FPU_EMU) /* * For SH-4 lacking an FPU, treat floating point instructions as * reserved. They'll be handled in the math-emu case, or faulted on * otherwise. */ set_exception_table_evt(0x800, do_reserved_inst); set_exception_table_evt(0x820, do_illegal_slot_inst); #elif defined(CONFIG_SH_FPU) set_exception_table_evt(0x800, fpu_state_restore_trap_handler); set_exception_table_evt(0x820, fpu_state_restore_trap_handler); #endif #ifdef CONFIG_CPU_SH2 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); #endif #ifdef CONFIG_CPU_SH2A set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); #ifdef CONFIG_SH_FPU set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler); #endif #endif #ifdef TRAP_UBC set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); #endif }
gpl-2.0
TeamDS/htc-kernel-doubleshot_26
drivers/mtd/maps/l440gx.c
2006
3754
/* * BIOS Flash chip on Intel 440GX board. * * Bugs this currently does not work under linuxBIOS. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #define PIIXE_IOBASE_RESOURCE 11 #define WINDOW_ADDR 0xfff00000 #define WINDOW_SIZE 0x00100000 #define BUSWIDTH 1 static u32 iobase; #define IOBASE iobase #define TRIBUF_PORT (IOBASE+0x37) #define VPP_PORT (IOBASE+0x28) static struct mtd_info *mymtd; /* Is this really the vpp port? */ static void l440gx_set_vpp(struct map_info *map, int vpp) { unsigned long l; l = inl(VPP_PORT); if (vpp) { l |= 1; } else { l &= ~1; } outl(l, VPP_PORT); } static struct map_info l440gx_map = { .name = "L440GX BIOS", .size = WINDOW_SIZE, .bankwidth = BUSWIDTH, .phys = WINDOW_ADDR, #if 0 /* FIXME verify that this is the * appripriate code for vpp enable/disable */ .set_vpp = l440gx_set_vpp #endif }; static int __init init_l440gx(void) { struct pci_dev *dev, *pm_dev; struct resource *pm_iobase; __u16 word; dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, NULL); pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); pci_dev_put(dev); if (!dev || !pm_dev) { printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); pci_dev_put(pm_dev); return -ENODEV; } l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); pci_dev_put(pm_dev); return -ENOMEM; } simple_map_init(&l440gx_map); printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt); /* Setup the pm iobase resource * This code should move into some kind of generic bridge * driver but for the moment I'm content with getting the * allocation correct. */ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE]; if (!(pm_iobase->flags & IORESOURCE_IO)) { pm_iobase->name = "pm iobase"; pm_iobase->start = 0; pm_iobase->end = 63; pm_iobase->flags = IORESOURCE_IO; /* Put the current value in the resource */ pci_read_config_dword(pm_dev, 0x40, &iobase); iobase &= ~1; pm_iobase->start += iobase & ~1; pm_iobase->end += iobase & ~1; pci_dev_put(pm_dev); /* Allocate the resource region */ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { pci_dev_put(dev); pci_dev_put(pm_dev); printk(KERN_WARNING "Could not allocate pm iobase resource\n"); iounmap(l440gx_map.virt); return -ENXIO; } } /* Set the iobase */ iobase = pm_iobase->start; pci_write_config_dword(pm_dev, 0x40, iobase | 1); /* Set XBCS# */ pci_read_config_word(dev, 0x4e, &word); word |= 0x4; pci_write_config_word(dev, 0x4e, word); /* Supply write voltage to the chip */ l440gx_set_vpp(&l440gx_map, 1); /* Enable the gate on the WE line */ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT); printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n"); mymtd = do_map_probe("jedec_probe", &l440gx_map); if (!mymtd) { printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n"); mymtd = do_map_probe("map_rom", &l440gx_map); } if (mymtd) { mymtd->owner = THIS_MODULE; add_mtd_device(mymtd); return 0; } iounmap(l440gx_map.virt); return -ENXIO; } static void __exit cleanup_l440gx(void) { del_mtd_device(mymtd); map_destroy(mymtd); iounmap(l440gx_map.virt); } module_init(init_l440gx); module_exit(cleanup_l440gx); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
gpl-2.0
transi/kernel_amazon_bowser-common
fs/xfs/linux-2.6/xfs_aops.c
2518
37523
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_trans.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_rw.h" #include "xfs_iomap.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" #include "xfs_bmap.h" #include <linux/gfp.h> #include <linux/mpage.h> #include <linux/pagevec.h> #include <linux/writeback.h> /* * Prime number of hash buckets since address is used as the key. */ #define NVSYNC 37 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) static wait_queue_head_t xfs_ioend_wq[NVSYNC]; void __init xfs_ioend_init(void) { int i; for (i = 0; i < NVSYNC; i++) init_waitqueue_head(&xfs_ioend_wq[i]); } void xfs_ioend_wait( xfs_inode_t *ip) { wait_queue_head_t *wq = to_ioend_wq(ip); wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); } STATIC void xfs_ioend_wake( xfs_inode_t *ip) { if (atomic_dec_and_test(&ip->i_iocount)) wake_up(to_ioend_wq(ip)); } void xfs_count_page_state( struct page *page, int *delalloc, int *unwritten) { struct buffer_head *bh, *head; *delalloc = *unwritten = 0; bh = head = page_buffers(page); do { if (buffer_unwritten(bh)) (*unwritten) = 1; else if (buffer_delay(bh)) (*delalloc) = 1; } while ((bh = bh->b_this_page) != head); } STATIC struct block_device * xfs_find_bdev_for_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; if (XFS_IS_REALTIME_INODE(ip)) return mp->m_rtdev_targp->bt_bdev; else return mp->m_ddev_targp->bt_bdev; } /* * We're now finished for good with this ioend structure. * Update the page state via the associated buffer_heads, * release holds on the inode and bio, and finally free * up memory. Do not use the ioend after this. */ STATIC void xfs_destroy_ioend( xfs_ioend_t *ioend) { struct buffer_head *bh, *next; struct xfs_inode *ip = XFS_I(ioend->io_inode); for (bh = ioend->io_buffer_head; bh; bh = next) { next = bh->b_private; bh->b_end_io(bh, !ioend->io_error); } /* * Volume managers supporting multiple paths can send back ENODEV * when the final path disappears. In this case continuing to fill * the page cache with dirty data which cannot be written out is * evil, so prevent that. */ if (unlikely(ioend->io_error == -ENODEV)) { xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, __FILE__, __LINE__); } xfs_ioend_wake(ip); mempool_free(ioend, xfs_ioend_pool); } /* * If the end of the current ioend is beyond the current EOF, * return the new EOF value, otherwise zero. */ STATIC xfs_fsize_t xfs_ioend_new_eof( xfs_ioend_t *ioend) { xfs_inode_t *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; xfs_fsize_t bsize; bsize = ioend->io_offset + ioend->io_size; isize = MAX(ip->i_size, ip->i_new_size); isize = MIN(isize, bsize); return isize > ip->i_d.di_size ? isize : 0; } /* * Update on-disk file size now that data has been written to disk. The * current in-memory file size is i_size. If a write is beyond eof i_new_size * will be the intended file size until i_size is updated. If this write does * not extend all the way to the valid file size then restrict this update to * the end of the write. * * This function does not block as blocking on the inode lock in IO completion * can lead to IO completion order dependency deadlocks.. If it can't get the * inode ilock it will return EAGAIN. Callers must handle this. */ STATIC int xfs_setfilesize( xfs_ioend_t *ioend) { xfs_inode_t *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; if (unlikely(ioend->io_error)) return 0; if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) return EAGAIN; isize = xfs_ioend_new_eof(ioend); if (isize) { ip->i_d.di_size = isize; xfs_mark_inode_dirty(ip); } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } /* * Schedule IO completion handling on the final put of an ioend. */ STATIC void xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { if (ioend->io_type == IO_UNWRITTEN) queue_work(xfsconvertd_workqueue, &ioend->io_work); else queue_work(xfsdatad_workqueue, &ioend->io_work); } } /* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == IO_UNWRITTEN && likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); if (error) ioend->io_error = error; } /* * We might have to update the on-disk file size after extending * writes. */ error = xfs_setfilesize(ioend); ASSERT(!error || error == EAGAIN); /* * If we didn't complete processing of the ioend, requeue it to the * tail of the workqueue for another attempt later. Otherwise destroy * it. */ if (error == EAGAIN) { atomic_inc(&ioend->io_remaining); xfs_finish_ioend(ioend); /* ensure we don't spin on blocked ioends */ delay(1); } else { if (ioend->io_iocb) aio_complete(ioend->io_iocb, ioend->io_result, 0); xfs_destroy_ioend(ioend); } } /* * Call IO completion handling in caller context on the final put of an ioend. */ STATIC void xfs_finish_ioend_sync( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) xfs_end_io(&ioend->io_work); } /* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later * (vs. incore size). */ STATIC xfs_ioend_t * xfs_alloc_ioend( struct inode *inode, unsigned int type) { xfs_ioend_t *ioend; ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); /* * Set the count to 1 initially, which will prevent an I/O * completion callback from happening before we have started * all the I/O from calling the completion routine too early. */ atomic_set(&ioend->io_remaining, 1); ioend->io_error = 0; ioend->io_list = NULL; ioend->io_type = type; ioend->io_inode = inode; ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); ioend->io_offset = 0; ioend->io_size = 0; ioend->io_iocb = NULL; ioend->io_result = 0; INIT_WORK(&ioend->io_work, xfs_end_io); return ioend; } STATIC int xfs_map_blocks( struct inode *inode, loff_t offset, struct xfs_bmbt_irec *imap, int type, int nonblocking) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; ssize_t count = 1 << inode->i_blkbits; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int bmapi_flags = XFS_BMAPI_ENTIRE; int nimaps = 1; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); if (type == IO_UNWRITTEN) bmapi_flags |= XFS_BMAPI_IGSTATE; if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { if (nonblocking) return -XFS_ERROR(EAGAIN); xfs_ilock(ip, XFS_ILOCK_SHARED); } ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || (ip->i_df.if_flags & XFS_IFEXTENTS)); ASSERT(offset <= mp->m_maxioffset); if (offset + count > mp->m_maxioffset) count = mp->m_maxioffset - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, bmapi_flags, NULL, 0, imap, &nimaps, NULL); xfs_iunlock(ip, XFS_ILOCK_SHARED); if (error) return -XFS_ERROR(error); if (type == IO_DELALLOC && (!nimaps || isnullstartblock(imap->br_startblock))) { error = xfs_iomap_write_allocate(ip, offset, count, imap); if (!error) trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); return -XFS_ERROR(error); } #ifdef DEBUG if (type == IO_UNWRITTEN) { ASSERT(nimaps); ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); } #endif if (nimaps) trace_xfs_map_blocks_found(ip, offset, count, type, imap); return 0; } STATIC int xfs_imap_valid( struct inode *inode, struct xfs_bmbt_irec *imap, xfs_off_t offset) { offset >>= inode->i_blkbits; return offset >= imap->br_startoff && offset < imap->br_startoff + imap->br_blockcount; } /* * BIO completion handler for buffered IO. */ STATIC void xfs_end_bio( struct bio *bio, int error) { xfs_ioend_t *ioend = bio->bi_private; ASSERT(atomic_read(&bio->bi_cnt) >= 1); ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; /* Toss bio and pass work off to an xfsdatad thread */ bio->bi_private = NULL; bio->bi_end_io = NULL; bio_put(bio); xfs_finish_ioend(ioend); } STATIC void xfs_submit_ioend_bio( struct writeback_control *wbc, xfs_ioend_t *ioend, struct bio *bio) { atomic_inc(&ioend->io_remaining); bio->bi_private = ioend; bio->bi_end_io = xfs_end_bio; /* * If the I/O is beyond EOF we mark the inode dirty immediately * but don't update the inode size until I/O completion. */ if (xfs_ioend_new_eof(ioend)) xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); } STATIC struct bio * xfs_alloc_ioend_bio( struct buffer_head *bh) { int nvecs = bio_get_nr_vecs(bh->b_bdev); struct bio *bio = bio_alloc(GFP_NOIO, nvecs); ASSERT(bio->bi_private == NULL); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; return bio; } STATIC void xfs_start_buffer_writeback( struct buffer_head *bh) { ASSERT(buffer_mapped(bh)); ASSERT(buffer_locked(bh)); ASSERT(!buffer_delay(bh)); ASSERT(!buffer_unwritten(bh)); mark_buffer_async_write(bh); set_buffer_uptodate(bh); clear_buffer_dirty(bh); } STATIC void xfs_start_page_writeback( struct page *page, int clear_dirty, int buffers) { ASSERT(PageLocked(page)); ASSERT(!PageWriteback(page)); if (clear_dirty) clear_page_dirty_for_io(page); set_page_writeback(page); unlock_page(page); /* If no buffers on the page are to be written, finish it here */ if (!buffers) end_page_writeback(page); } static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) { return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); } /* * Submit all of the bios for all of the ioends we have saved up, covering the * initial writepage page and also any probed pages. * * Because we may have multiple ioends spanning a page, we need to start * writeback on all the buffers before we submit them for I/O. If we mark the * buffers as we got, then we can end up with a page that only has buffers * marked async write and I/O complete on can occur before we mark the other * buffers async write. * * The end result of this is that we trip a bug in end_page_writeback() because * we call it twice for the one page as the code in end_buffer_async_write() * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the * buffer_heads, and then submit them for I/O on the second pass. */ STATIC void xfs_submit_ioend( struct writeback_control *wbc, xfs_ioend_t *ioend) { xfs_ioend_t *head = ioend; xfs_ioend_t *next; struct buffer_head *bh; struct bio *bio; sector_t lastblock = 0; /* Pass 1 - start writeback */ do { next = ioend->io_list; for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) xfs_start_buffer_writeback(bh); } while ((ioend = next) != NULL); /* Pass 2 - submit I/O */ ioend = head; do { next = ioend->io_list; bio = NULL; for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { if (!bio) { retry: bio = xfs_alloc_ioend_bio(bh); } else if (bh->b_blocknr != lastblock + 1) { xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } if (bio_add_buffer(bio, bh) != bh->b_size) { xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } lastblock = bh->b_blocknr; } if (bio) xfs_submit_ioend_bio(wbc, ioend, bio); xfs_finish_ioend(ioend); } while ((ioend = next) != NULL); } /* * Cancel submission of all buffer_heads so far in this endio. * Toss the endio too. Only ever called for the initial page * in a writepage request, so only ever one page. */ STATIC void xfs_cancel_ioend( xfs_ioend_t *ioend) { xfs_ioend_t *next; struct buffer_head *bh, *next_bh; do { next = ioend->io_list; bh = ioend->io_buffer_head; do { next_bh = bh->b_private; clear_buffer_async_write(bh); unlock_buffer(bh); } while ((bh = next_bh) != NULL); xfs_ioend_wake(XFS_I(ioend->io_inode)); mempool_free(ioend, xfs_ioend_pool); } while ((ioend = next) != NULL); } /* * Test to see if we've been building up a completion structure for * earlier buffers -- if so, we try to append to this ioend if we * can, otherwise we finish off any current ioend and start another. * Return true if we've finished the given ioend. */ STATIC void xfs_add_to_ioend( struct inode *inode, struct buffer_head *bh, xfs_off_t offset, unsigned int type, xfs_ioend_t **result, int need_ioend) { xfs_ioend_t *ioend = *result; if (!ioend || need_ioend || type != ioend->io_type) { xfs_ioend_t *previous = *result; ioend = xfs_alloc_ioend(inode, type); ioend->io_offset = offset; ioend->io_buffer_head = bh; ioend->io_buffer_tail = bh; if (previous) previous->io_list = ioend; *result = ioend; } else { ioend->io_buffer_tail->b_private = bh; ioend->io_buffer_tail = bh; } bh->b_private = NULL; ioend->io_size += bh->b_size; } STATIC void xfs_map_buffer( struct inode *inode, struct buffer_head *bh, struct xfs_bmbt_irec *imap, xfs_off_t offset) { sector_t bn; struct xfs_mount *m = XFS_I(inode)->i_mount; xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + ((offset - iomap_offset) >> inode->i_blkbits); ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); bh->b_blocknr = bn; set_buffer_mapped(bh); } STATIC void xfs_map_at_offset( struct inode *inode, struct buffer_head *bh, struct xfs_bmbt_irec *imap, xfs_off_t offset) { ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); xfs_map_buffer(inode, bh, imap, offset); set_buffer_mapped(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); } /* * Test if a given page is suitable for writing as part of an unwritten * or delayed allocate extent. */ STATIC int xfs_is_delayed_page( struct page *page, unsigned int type) { if (PageWriteback(page)) return 0; if (page->mapping && page_has_buffers(page)) { struct buffer_head *bh, *head; int acceptable = 0; bh = head = page_buffers(page); do { if (buffer_unwritten(bh)) acceptable = (type == IO_UNWRITTEN); else if (buffer_delay(bh)) acceptable = (type == IO_DELALLOC); else if (buffer_dirty(bh) && buffer_mapped(bh)) acceptable = (type == IO_OVERWRITE); else break; } while ((bh = bh->b_this_page) != head); if (acceptable) return 1; } return 0; } /* * Allocate & map buffers for page given the extent map. Write it out. * except for the original page of a writepage, this is called on * delalloc/unwritten pages only, for the original page it is possible * that the page has no mapping at all. */ STATIC int xfs_convert_page( struct inode *inode, struct page *page, loff_t tindex, struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, struct writeback_control *wbc) { struct buffer_head *bh, *head; xfs_off_t end_offset; unsigned long p_offset; unsigned int type; int len, page_dirty; int count = 0, done = 0, uptodate = 1; xfs_off_t offset = page_offset(page); if (page->index != tindex) goto fail; if (!trylock_page(page)) goto fail; if (PageWriteback(page)) goto fail_unlock_page; if (page->mapping != inode->i_mapping) goto fail_unlock_page; if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) goto fail_unlock_page; /* * page_dirty is initially a count of buffers on the page before * EOF and is decremented as we move each into a cleanable state. * * Derivation: * * End offset is the highest offset that this page should represent. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) * will evaluate non-zero and be less than PAGE_CACHE_SIZE and * hence give us the correct page_dirty count. On any other page, * it will be zero and in that case we need page_dirty to be the * count of buffers on the page. */ end_offset = min_t(unsigned long long, (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, i_size_read(inode)); len = 1 << inode->i_blkbits; p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), PAGE_CACHE_SIZE); p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; page_dirty = p_offset / len; bh = head = page_buffers(page); do { if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; if (!(PageUptodate(page) || buffer_uptodate(bh))) { done = 1; continue; } if (buffer_unwritten(bh) || buffer_delay(bh) || buffer_mapped(bh)) { if (buffer_unwritten(bh)) type = IO_UNWRITTEN; else if (buffer_delay(bh)) type = IO_DELALLOC; else type = IO_OVERWRITE; if (!xfs_imap_valid(inode, imap, offset)) { done = 1; continue; } lock_buffer(bh); if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, imap, offset); xfs_add_to_ioend(inode, bh, offset, type, ioendp, done); page_dirty--; count++; } else { done = 1; } } while (offset += len, (bh = bh->b_this_page) != head); if (uptodate && bh == head) SetPageUptodate(page); if (count) { if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) done = 1; } xfs_start_page_writeback(page, !page_dirty, count); return done; fail_unlock_page: unlock_page(page); fail: return 1; } /* * Convert & write out a cluster of pages in the same extent as defined * by mp and following the start page. */ STATIC void xfs_cluster_write( struct inode *inode, pgoff_t tindex, struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, struct writeback_control *wbc, pgoff_t tlast) { struct pagevec pvec; int done = 0, i; pagevec_init(&pvec, 0); while (!done && tindex <= tlast) { unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) break; for (i = 0; i < pagevec_count(&pvec); i++) { done = xfs_convert_page(inode, pvec.pages[i], tindex++, imap, ioendp, wbc); if (done) break; } pagevec_release(&pvec); cond_resched(); } } STATIC void xfs_vm_invalidatepage( struct page *page, unsigned long offset) { trace_xfs_invalidatepage(page->mapping->host, page, offset); block_invalidatepage(page, offset); } /* * If the page has delalloc buffers on it, we need to punch them out before we * invalidate the page. If we don't, we leave a stale delalloc mapping on the * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read * is done on that same region - the delalloc extent is returned when none is * supposed to be there. * * We prevent this by truncating away the delalloc regions on the page before * invalidating it. Because they are delalloc, we can do this without needing a * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this * truncation without a transaction as there is no space left for block * reservation (typically why we see a ENOSPC in writeback). * * This is not a performance critical path, so for now just do the punching a * buffer head at a time. */ STATIC void xfs_aops_discard_page( struct page *page) { struct inode *inode = page->mapping->host; struct xfs_inode *ip = XFS_I(inode); struct buffer_head *bh, *head; loff_t offset = page_offset(page); if (!xfs_is_delayed_page(page, IO_DELALLOC)) goto out_invalidate; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) goto out_invalidate; xfs_alert(ip->i_mount, "page discard on page %p, inode 0x%llx, offset %llu.", page, ip->i_ino, offset); xfs_ilock(ip, XFS_ILOCK_EXCL); bh = head = page_buffers(page); do { int error; xfs_fileoff_t start_fsb; if (!buffer_delay(bh)) goto next_buffer; start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); if (error) { /* something screwed, just bail */ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_alert(ip->i_mount, "page discard unable to remove delalloc mapping."); } break; } next_buffer: offset += 1 << inode->i_blkbits; } while ((bh = bh->b_this_page) != head); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: xfs_vm_invalidatepage(page, 0); return; } /* * Write out a dirty page. * * For delalloc space on the page we need to allocate space and flush it. * For unwritten space on the page we need to start the conversion to * regular allocated space. * For any other dirty buffer heads on the page we should flush them. * * If we detect that a transaction would be required to flush the page, we * have to check the process flags first, if we are already in a transaction * or disk I/O during allocations is off, we need to fail the writepage and * redirty the page. */ STATIC int xfs_vm_writepage( struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int delalloc, unwritten; struct buffer_head *bh, *head; struct xfs_bmbt_irec imap; xfs_ioend_t *ioend = NULL, *iohead = NULL; loff_t offset; unsigned int type; __uint64_t end_offset; pgoff_t end_index, last_index; ssize_t len; int err, imap_valid = 0, uptodate = 1; int count = 0; int nonblocking = 0; trace_xfs_writepage(inode, page, 0); ASSERT(page_has_buffers(page)); /* * Refuse to write the page out if we are called from reclaim context. * * This avoids stack overflows when called from deeply used stacks in * random callers for direct reclaim or memcg reclaim. We explicitly * allow reclaim from kswapd as the stack usage there is relatively low. * * This should really be done by the core VM, but until that happens * filesystems like XFS, btrfs and ext4 have to take care of this * by themselves. */ if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) goto redirty; /* * We need a transaction if there are delalloc or unwritten buffers * on the page. * * If we need a transaction and the process flags say we are already * in a transaction, or no IO is allowed then mark the page dirty * again and leave the page as is. */ xfs_count_page_state(page, &delalloc, &unwritten); if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) goto redirty; /* Is this page beyond the end of the file? */ offset = i_size_read(inode); end_index = offset >> PAGE_CACHE_SHIFT; last_index = (offset - 1) >> PAGE_CACHE_SHIFT; if (page->index >= end_index) { if ((page->index >= end_index + 1) || !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { unlock_page(page); return 0; } } end_offset = min_t(unsigned long long, (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); len = 1 << inode->i_blkbits; bh = head = page_buffers(page); offset = page_offset(page); type = IO_OVERWRITE; if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) nonblocking = 1; do { int new_ioend = 0; if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; /* * set_page_dirty dirties all buffers in a page, independent * of their state. The dirty state however is entirely * meaningless for holes (!mapped && uptodate), so skip * buffers covering holes here. */ if (!buffer_mapped(bh) && buffer_uptodate(bh)) { imap_valid = 0; continue; } if (buffer_unwritten(bh)) { if (type != IO_UNWRITTEN) { type = IO_UNWRITTEN; imap_valid = 0; } } else if (buffer_delay(bh)) { if (type != IO_DELALLOC) { type = IO_DELALLOC; imap_valid = 0; } } else if (buffer_uptodate(bh)) { if (type != IO_OVERWRITE) { type = IO_OVERWRITE; imap_valid = 0; } } else { if (PageUptodate(page)) { ASSERT(buffer_mapped(bh)); imap_valid = 0; } continue; } if (imap_valid) imap_valid = xfs_imap_valid(inode, &imap, offset); if (!imap_valid) { /* * If we didn't have a valid mapping then we need to * put the new mapping into a separate ioend structure. * This ensures non-contiguous extents always have * separate ioends, which is particularly important * for unwritten extent conversion at I/O completion * time. */ new_ioend = 1; err = xfs_map_blocks(inode, offset, &imap, type, nonblocking); if (err) goto error; imap_valid = xfs_imap_valid(inode, &imap, offset); } if (imap_valid) { lock_buffer(bh); if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, &imap, offset); xfs_add_to_ioend(inode, bh, offset, type, &ioend, new_ioend); count++; } if (!iohead) iohead = ioend; } while (offset += len, ((bh = bh->b_this_page) != head)); if (uptodate && bh == head) SetPageUptodate(page); xfs_start_page_writeback(page, 1, count); if (ioend && imap_valid) { xfs_off_t end_index; end_index = imap.br_startoff + imap.br_blockcount; /* to bytes */ end_index <<= inode->i_blkbits; /* to pages */ end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; /* check against file size */ if (end_index > last_index) end_index = last_index; xfs_cluster_write(inode, page->index + 1, &imap, &ioend, wbc, end_index); } if (iohead) xfs_submit_ioend(wbc, iohead); return 0; error: if (iohead) xfs_cancel_ioend(iohead); if (err == -EAGAIN) goto redirty; xfs_aops_discard_page(page); ClearPageUptodate(page); unlock_page(page); return err; redirty: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } STATIC int xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) { xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return generic_writepages(mapping, wbc); } /* * Called to move a page into cleanable state - and from there * to be released. The page should already be clean. We always * have buffer heads in this call. * * Returns 1 if the page is ok to release, 0 otherwise. */ STATIC int xfs_vm_releasepage( struct page *page, gfp_t gfp_mask) { int delalloc, unwritten; trace_xfs_releasepage(page->mapping->host, page, 0); xfs_count_page_state(page, &delalloc, &unwritten); if (WARN_ON(delalloc)) return 0; if (WARN_ON(unwritten)) return 0; return try_to_free_buffers(page); } STATIC int __xfs_get_blocks( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int direct) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int lockmode = 0; struct xfs_bmbt_irec imap; int nimaps = 1; xfs_off_t offset; ssize_t size; int new = 0; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); offset = (xfs_off_t)iblock << inode->i_blkbits; ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); size = bh_result->b_size; if (!create && direct && offset >= i_size_read(inode)) return 0; if (create) { lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, lockmode); } else { lockmode = xfs_ilock_map_shared(ip); } ASSERT(offset <= mp->m_maxioffset); if (offset + size > mp->m_maxioffset) size = mp->m_maxioffset - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); if (error) goto out_unlock; if (create && (!nimaps || (imap.br_startblock == HOLESTARTBLOCK || imap.br_startblock == DELAYSTARTBLOCK))) { if (direct) { error = xfs_iomap_write_direct(ip, offset, size, &imap, nimaps); } else { error = xfs_iomap_write_delay(ip, offset, size, &imap); } if (error) goto out_unlock; trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); } else if (nimaps) { trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); } else { trace_xfs_get_blocks_notfound(ip, offset, size); goto out_unlock; } xfs_iunlock(ip, lockmode); if (imap.br_startblock != HOLESTARTBLOCK && imap.br_startblock != DELAYSTARTBLOCK) { /* * For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !ISUNWRITTEN(&imap)) xfs_map_buffer(inode, bh_result, &imap, offset); if (create && ISUNWRITTEN(&imap)) { if (direct) bh_result->b_private = inode; set_buffer_unwritten(bh_result); } } /* * If this is a realtime file, data may be on a different device. * to that pointed to from the buffer_head b_bdev currently. */ bh_result->b_bdev = xfs_find_bdev_for_inode(inode); /* * If we previously allocated a block out beyond eof and we are now * coming back to use it then we will need to flag it as new even if it * has a disk address. * * With sub-block writes into unwritten extents we also need to mark * the buffer as new so that the unwritten parts of the buffer gets * correctly zeroed. */ if (create && ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || (offset >= i_size_read(inode)) || (new || ISUNWRITTEN(&imap)))) set_buffer_new(bh_result); if (imap.br_startblock == DELAYSTARTBLOCK) { BUG_ON(direct); if (create) { set_buffer_uptodate(bh_result); set_buffer_mapped(bh_result); set_buffer_delay(bh_result); } } /* * If this is O_DIRECT or the mpage code calling tell them how large * the mapping is, so that we can avoid repeated get_blocks calls. */ if (direct || size > (1 << inode->i_blkbits)) { xfs_off_t mapping_size; mapping_size = imap.br_startoff + imap.br_blockcount - iblock; mapping_size <<= inode->i_blkbits; ASSERT(mapping_size > 0); if (mapping_size > size) mapping_size = size; if (mapping_size > LONG_MAX) mapping_size = LONG_MAX; bh_result->b_size = mapping_size; } return 0; out_unlock: xfs_iunlock(ip, lockmode); return -error; } int xfs_get_blocks( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return __xfs_get_blocks(inode, iblock, bh_result, create, 0); } STATIC int xfs_get_blocks_direct( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return __xfs_get_blocks(inode, iblock, bh_result, create, 1); } /* * Complete a direct I/O write request. * * If the private argument is non-NULL __xfs_get_blocks signals us that we * need to issue a transaction to convert the range from unwritten to written * extents. In case this is regular synchronous I/O we just call xfs_end_io * to do this and we are done. But in case this was a successful AIO * request this handler is called from interrupt context, from which we * can't start transactions. In that case offload the I/O completion to * the workqueues we also use for buffered I/O completion. */ STATIC void xfs_end_io_direct_write( struct kiocb *iocb, loff_t offset, ssize_t size, void *private, int ret, bool is_async) { struct xfs_ioend *ioend = iocb->private; /* * blockdev_direct_IO can return an error even after the I/O * completion handler was called. Thus we need to protect * against double-freeing. */ iocb->private = NULL; ioend->io_offset = offset; ioend->io_size = size; if (private && size > 0) ioend->io_type = IO_UNWRITTEN; if (is_async) { /* * If we are converting an unwritten extent we need to delay * the AIO completion until after the unwrittent extent * conversion has completed, otherwise do it ASAP. */ if (ioend->io_type == IO_UNWRITTEN) { ioend->io_iocb = iocb; ioend->io_result = ret; } else { aio_complete(iocb, ret, 0); } xfs_finish_ioend(ioend); } else { xfs_finish_ioend_sync(ioend); } } STATIC ssize_t xfs_vm_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct inode *inode = iocb->ki_filp->f_mapping->host; struct block_device *bdev = xfs_find_bdev_for_inode(inode); ssize_t ret; if (rw & WRITE) { iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, xfs_get_blocks_direct, xfs_end_io_direct_write, NULL, 0); if (ret != -EIOCBQUEUED && iocb->private) xfs_destroy_ioend(iocb->private); } else { ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, xfs_get_blocks_direct, NULL, NULL, 0); } return ret; } STATIC void xfs_vm_write_failed( struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { /* * punch out the delalloc blocks we have already allocated. We * don't call xfs_setattr() to do this as we may be in the * middle of a multi-iovec write and so the vfs inode->i_size * will not match the xfs ip->i_size and so it will zero too * much. Hence we jus truncate the page cache to zero what is * necessary and punch the delalloc blocks directly. */ struct xfs_inode *ip = XFS_I(inode); xfs_fileoff_t start_fsb; xfs_fileoff_t end_fsb; int error; truncate_pagecache(inode, to, inode->i_size); /* * Check if there are any blocks that are outside of i_size * that need to be trimmed back. */ start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; end_fsb = XFS_B_TO_FSB(ip->i_mount, to); if (end_fsb <= start_fsb) return; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, end_fsb - start_fsb); if (error) { /* something screwed, just bail */ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_alert(ip->i_mount, "xfs_vm_write_failed: unable to clean up ino %lld", ip->i_ino); } } xfs_iunlock(ip, XFS_ILOCK_EXCL); } } STATIC int xfs_vm_write_begin( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, pagep, xfs_get_blocks); if (unlikely(ret)) xfs_vm_write_failed(mapping, pos + len); return ret; } STATIC int xfs_vm_write_end( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (unlikely(ret < len)) xfs_vm_write_failed(mapping, pos + len); return ret; } STATIC sector_t xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; struct xfs_inode *ip = XFS_I(inode); trace_xfs_vm_bmap(XFS_I(inode)); xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); xfs_iunlock(ip, XFS_IOLOCK_SHARED); return generic_block_bmap(mapping, block, xfs_get_blocks); } STATIC int xfs_vm_readpage( struct file *unused, struct page *page) { return mpage_readpage(page, xfs_get_blocks); } STATIC int xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); } const struct address_space_operations xfs_address_space_operations = { .readpage = xfs_vm_readpage, .readpages = xfs_vm_readpages, .writepage = xfs_vm_writepage, .writepages = xfs_vm_writepages, .releasepage = xfs_vm_releasepage, .invalidatepage = xfs_vm_invalidatepage, .write_begin = xfs_vm_write_begin, .write_end = xfs_vm_write_end, .bmap = xfs_vm_bmap, .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, };
gpl-2.0
ChronoMonochrome/Chrono_Kernel
drivers/media/video/cx88/cx88-dvb.c
2518
46815
/* * * device driver for Conexant 2388x based TV cards * MPEG Transport Stream (DVB) routines * * (c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/kthread.h> #include <linux/file.h> #include <linux/suspend.h> #include "cx88.h" #include "dvb-pll.h" #include <media/v4l2-common.h> #include "mt352.h" #include "mt352_priv.h" #include "cx88-vp3054-i2c.h" #include "zl10353.h" #include "cx22702.h" #include "or51132.h" #include "lgdt330x.h" #include "s5h1409.h" #include "xc5000.h" #include "nxt200x.h" #include "cx24123.h" #include "isl6421.h" #include "tuner-simple.h" #include "tda9887.h" #include "s5h1411.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "cx24116.h" #include "stv0900.h" #include "stb6100.h" #include "stb6100_proc.h" #include "mb86a16.h" #include "ds3000.h" MODULE_DESCRIPTION("driver for cx2388x based DVB cards"); MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug,"enable debug messages [dvb]"); static unsigned int dvb_buf_tscnt = 32; module_param(dvb_buf_tscnt, int, 0644); MODULE_PARM_DESC(dvb_buf_tscnt, "DVB Buffer TS count [dvb]"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg) /* ------------------------------------------------------------------ */ static int dvb_buf_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx8802_dev *dev = q->priv_data; dev->ts_packet_size = 188 * 4; dev->ts_packet_count = dvb_buf_tscnt; *size = dev->ts_packet_size * dev->ts_packet_count; *count = dvb_buf_tscnt; return 0; } static int dvb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx8802_dev *dev = q->priv_data; return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field); } static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx8802_dev *dev = q->priv_data; cx8802_buf_queue(dev, (struct cx88_buffer*)vb); } static void dvb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { cx88_free_buffer(q, (struct cx88_buffer*)vb); } static const struct videobuf_queue_ops dvb_qops = { .buf_setup = dvb_buf_setup, .buf_prepare = dvb_buf_prepare, .buf_queue = dvb_buf_queue, .buf_release = dvb_buf_release, }; /* ------------------------------------------------------------------ */ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire) { struct cx8802_dev *dev= fe->dvb->priv; struct cx8802_driver *drv = NULL; int ret = 0; int fe_id; fe_id = videobuf_dvb_find_frontend(&dev->frontends, fe); if (!fe_id) { printk(KERN_ERR "%s() No frontend found\n", __func__); return -EINVAL; } mutex_lock(&dev->core->lock); drv = cx8802_get_driver(dev, CX88_MPEG_DVB); if (drv) { if (acquire){ dev->frontends.active_fe_id = fe_id; ret = drv->request_acquire(drv); } else { ret = drv->request_release(drv); dev->frontends.active_fe_id = 0; } } mutex_unlock(&dev->core->lock); return ret; } static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open) { struct videobuf_dvb_frontends *f; struct videobuf_dvb_frontend *fe; if (!core->dvbdev) return; f = &core->dvbdev->frontends; if (!f) return; if (f->gate <= 1) /* undefined or fe0 */ fe = videobuf_dvb_get_frontend(f, 1); else fe = videobuf_dvb_get_frontend(f, f->gate); if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl) fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open); } /* ------------------------------------------------------------------ */ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe) { static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 }; static const u8 reset [] = { RESET, 0x80 }; static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 }; static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 }; static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 }; static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static int dvico_dual_demod_init(struct dvb_frontend *fe) { static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 }; static const u8 reset [] = { RESET, 0x80 }; static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 }; static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 }; static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 }; static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe) { static const u8 clock_config [] = { 0x89, 0x38, 0x39 }; static const u8 reset [] = { 0x50, 0x80 }; static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 }; static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0x00, 0x40, 0x40 }; static const u8 dntv_extra[] = { 0xB5, 0x7A }; static const u8 capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(2000); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); udelay(2000); mt352_write(fe, dntv_extra, sizeof(dntv_extra)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static const struct mt352_config dvico_fusionhdtv = { .demod_address = 0x0f, .demod_init = dvico_fusionhdtv_demod_init, }; static const struct mt352_config dntv_live_dvbt_config = { .demod_address = 0x0f, .demod_init = dntv_live_dvbt_demod_init, }; static const struct mt352_config dvico_fusionhdtv_dual = { .demod_address = 0x0f, .demod_init = dvico_dual_demod_init, }; static const struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = { .demod_address = (0x1e >> 1), .no_tuner = 1, .if2 = 45600, }; static struct mb86a16_config twinhan_vp1027 = { .demod_address = 0x08, }; #if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE)) static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe) { static const u8 clock_config [] = { 0x89, 0x38, 0x38 }; static const u8 reset [] = { 0x50, 0x80 }; static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 }; static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0x00, 0x40, 0x40 }; static const u8 dntv_extra[] = { 0xB5, 0x7A }; static const u8 capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(2000); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); udelay(2000); mt352_write(fe, dntv_extra, sizeof(dntv_extra)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static const struct mt352_config dntv_live_dvbt_pro_config = { .demod_address = 0x0f, .no_tuner = 1, .demod_init = dntv_live_dvbt_pro_demod_init, }; #endif static const struct zl10353_config dvico_fusionhdtv_hybrid = { .demod_address = 0x0f, .no_tuner = 1, }; static const struct zl10353_config dvico_fusionhdtv_xc3028 = { .demod_address = 0x0f, .if2 = 45600, .no_tuner = 1, }; static const struct mt352_config dvico_fusionhdtv_mt352_xc3028 = { .demod_address = 0x0f, .if2 = 4560, .no_tuner = 1, .demod_init = dvico_fusionhdtv_demod_init, }; static const struct zl10353_config dvico_fusionhdtv_plus_v1_1 = { .demod_address = 0x0f, }; static const struct cx22702_config connexant_refboard_config = { .demod_address = 0x43, .output_mode = CX22702_SERIAL_OUTPUT, }; static const struct cx22702_config hauppauge_hvr_config = { .demod_address = 0x63, .output_mode = CX22702_SERIAL_OUTPUT, }; static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured) { struct cx8802_dev *dev= fe->dvb->priv; dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00; return 0; } static const struct or51132_config pchdtv_hd3000 = { .demod_address = 0x15, .set_ts_params = or51132_set_ts_param, }; static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index) { struct cx8802_dev *dev= fe->dvb->priv; struct cx88_core *core = dev->core; dprintk(1, "%s: index = %d\n", __func__, index); if (index == 0) cx_clear(MO_GP0_IO, 8); else cx_set(MO_GP0_IO, 8); return 0; } static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured) { struct cx8802_dev *dev= fe->dvb->priv; if (is_punctured) dev->ts_gen_cntrl |= 0x04; else dev->ts_gen_cntrl &= ~0x04; return 0; } static struct lgdt330x_config fusionhdtv_3_gold = { .demod_address = 0x0e, .demod_chip = LGDT3302, .serial_mpeg = 0x04, /* TPSERIAL for 3302 in TOP_CONTROL */ .set_ts_params = lgdt330x_set_ts_param, }; static const struct lgdt330x_config fusionhdtv_5_gold = { .demod_address = 0x0e, .demod_chip = LGDT3303, .serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */ .set_ts_params = lgdt330x_set_ts_param, }; static const struct lgdt330x_config pchdtv_hd5500 = { .demod_address = 0x59, .demod_chip = LGDT3303, .serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */ .set_ts_params = lgdt330x_set_ts_param, }; static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured) { struct cx8802_dev *dev= fe->dvb->priv; dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00; return 0; } static const struct nxt200x_config ati_hdtvwonder = { .demod_address = 0x0a, .set_ts_params = nxt200x_set_ts_param, }; static int cx24123_set_ts_param(struct dvb_frontend* fe, int is_punctured) { struct cx8802_dev *dev= fe->dvb->priv; dev->ts_gen_cntrl = 0x02; return 0; } static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct cx8802_dev *dev= fe->dvb->priv; struct cx88_core *core = dev->core; if (voltage == SEC_VOLTAGE_OFF) cx_write(MO_GP0_IO, 0x000006fb); else cx_write(MO_GP0_IO, 0x000006f9); if (core->prev_set_voltage) return core->prev_set_voltage(fe, voltage); return 0; } static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx8802_dev *dev= fe->dvb->priv; struct cx88_core *core = dev->core; if (voltage == SEC_VOLTAGE_OFF) { dprintk(1,"LNB Voltage OFF\n"); cx_write(MO_GP0_IO, 0x0000efff); } if (core->prev_set_voltage) return core->prev_set_voltage(fe, voltage); return 0; } static int tevii_dvbs_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx8802_dev *dev= fe->dvb->priv; struct cx88_core *core = dev->core; cx_set(MO_GP0_IO, 0x6040); switch (voltage) { case SEC_VOLTAGE_13: cx_clear(MO_GP0_IO, 0x20); break; case SEC_VOLTAGE_18: cx_set(MO_GP0_IO, 0x20); break; case SEC_VOLTAGE_OFF: cx_clear(MO_GP0_IO, 0x20); break; } if (core->prev_set_voltage) return core->prev_set_voltage(fe, voltage); return 0; } static int vp1027_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx8802_dev *dev = fe->dvb->priv; struct cx88_core *core = dev->core; switch (voltage) { case SEC_VOLTAGE_13: dprintk(1, "LNB SEC Voltage=13\n"); cx_write(MO_GP0_IO, 0x00001220); break; case SEC_VOLTAGE_18: dprintk(1, "LNB SEC Voltage=18\n"); cx_write(MO_GP0_IO, 0x00001222); break; case SEC_VOLTAGE_OFF: dprintk(1, "LNB Voltage OFF\n"); cx_write(MO_GP0_IO, 0x00001230); break; } if (core->prev_set_voltage) return core->prev_set_voltage(fe, voltage); return 0; } static const struct cx24123_config geniatech_dvbs_config = { .demod_address = 0x55, .set_ts_params = cx24123_set_ts_param, }; static const struct cx24123_config hauppauge_novas_config = { .demod_address = 0x55, .set_ts_params = cx24123_set_ts_param, }; static const struct cx24123_config kworld_dvbs_100_config = { .demod_address = 0x15, .set_ts_params = cx24123_set_ts_param, .lnb_polarity = 1, }; static const struct s5h1409_config pinnacle_pctv_hd_800i_config = { .demod_address = 0x32 >> 1, .output_mode = S5H1409_PARALLEL_OUTPUT, .gpio = S5H1409_GPIO_ON, .qam_if = 44000, .inversion = S5H1409_INVERSION_OFF, .status_mode = S5H1409_DEMODLOCKING, .mpeg_timing = S5H1409_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK, }; static const struct s5h1409_config dvico_hdtv5_pci_nano_config = { .demod_address = 0x32 >> 1, .output_mode = S5H1409_SERIAL_OUTPUT, .gpio = S5H1409_GPIO_OFF, .inversion = S5H1409_INVERSION_OFF, .status_mode = S5H1409_DEMODLOCKING, .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, }; static const struct s5h1409_config kworld_atsc_120_config = { .demod_address = 0x32 >> 1, .output_mode = S5H1409_SERIAL_OUTPUT, .gpio = S5H1409_GPIO_OFF, .inversion = S5H1409_INVERSION_OFF, .status_mode = S5H1409_DEMODLOCKING, .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, }; static const struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = { .i2c_address = 0x64, .if_khz = 5380, }; static const struct zl10353_config cx88_pinnacle_hybrid_pctv = { .demod_address = (0x1e >> 1), .no_tuner = 1, .if2 = 45600, }; static const struct zl10353_config cx88_geniatech_x8000_mt = { .demod_address = (0x1e >> 1), .no_tuner = 1, .disable_i2c_gate_ctrl = 1, }; static const struct s5h1411_config dvico_fusionhdtv7_config = { .output_mode = S5H1411_SERIAL_OUTPUT, .gpio = S5H1411_GPIO_ON, .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, .qam_if = S5H1411_IF_44000, .vsb_if = S5H1411_IF_44000, .inversion = S5H1411_INVERSION_OFF, .status_mode = S5H1411_DEMODLOCKING }; static const struct xc5000_config dvico_fusionhdtv7_tuner_config = { .i2c_address = 0xc2 >> 1, .if_khz = 5380, }; static int attach_xc3028(u8 addr, struct cx8802_dev *dev) { struct dvb_frontend *fe; struct videobuf_dvb_frontend *fe0 = NULL; struct xc2028_ctrl ctl; struct xc2028_config cfg = { .i2c_adap = &dev->core->i2c_adap, .i2c_addr = addr, .ctrl = &ctl, }; /* Get the first frontend */ fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); if (!fe0) return -EINVAL; if (!fe0->dvb.frontend) { printk(KERN_ERR "%s/2: dvb frontend not attached. " "Can't attach xc3028\n", dev->core->name); return -EINVAL; } /* * Some xc3028 devices may be hidden by an I2C gate. This is known * to happen with some s5h1409-based devices. * Now that I2C gate is open, sets up xc3028 configuration */ cx88_setup_xc3028(dev->core, &ctl); fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg); if (!fe) { printk(KERN_ERR "%s/2: xc3028 attach failed\n", dev->core->name); dvb_frontend_detach(fe0->dvb.frontend); dvb_unregister_frontend(fe0->dvb.frontend); fe0->dvb.frontend = NULL; return -EINVAL; } printk(KERN_INFO "%s/2: xc3028 attached\n", dev->core->name); return 0; } static int cx24116_set_ts_param(struct dvb_frontend *fe, int is_punctured) { struct cx8802_dev *dev = fe->dvb->priv; dev->ts_gen_cntrl = 0x2; return 0; } static int stv0900_set_ts_param(struct dvb_frontend *fe, int is_punctured) { struct cx8802_dev *dev = fe->dvb->priv; dev->ts_gen_cntrl = 0; return 0; } static int cx24116_reset_device(struct dvb_frontend *fe) { struct cx8802_dev *dev = fe->dvb->priv; struct cx88_core *core = dev->core; /* Reset the part */ /* Put the cx24116 into reset */ cx_write(MO_SRST_IO, 0); msleep(10); /* Take the cx24116 out of reset */ cx_write(MO_SRST_IO, 1); msleep(10); return 0; } static const struct cx24116_config hauppauge_hvr4000_config = { .demod_address = 0x05, .set_ts_params = cx24116_set_ts_param, .reset_device = cx24116_reset_device, }; static const struct cx24116_config tevii_s460_config = { .demod_address = 0x55, .set_ts_params = cx24116_set_ts_param, .reset_device = cx24116_reset_device, }; static int ds3000_set_ts_param(struct dvb_frontend *fe, int is_punctured) { struct cx8802_dev *dev = fe->dvb->priv; dev->ts_gen_cntrl = 4; return 0; } static struct ds3000_config tevii_ds3000_config = { .demod_address = 0x68, .set_ts_params = ds3000_set_ts_param, }; static const struct stv0900_config prof_7301_stv0900_config = { .demod_address = 0x6a, /* demod_mode = 0,*/ .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .set_ts_params = stv0900_set_ts_param, }; static const struct stb6100_config prof_7301_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static const struct stv0299_config tevii_tuner_sharp_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = 1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, .set_ts_params = cx24116_set_ts_param, }; static const struct stv0288_config tevii_tuner_earda_config = { .demod_address = 0x68, .min_delay_ms = 100, .set_ts_params = cx24116_set_ts_param, }; static int cx8802_alloc_frontends(struct cx8802_dev *dev) { struct cx88_core *core = dev->core; struct videobuf_dvb_frontend *fe = NULL; int i; mutex_init(&dev->frontends.lock); INIT_LIST_HEAD(&dev->frontends.felist); if (!core->board.num_frontends) return -ENODEV; printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends); for (i = 1; i <= core->board.num_frontends; i++) { fe = videobuf_dvb_alloc_frontend(&dev->frontends, i); if (!fe) { printk(KERN_ERR "%s() failed to alloc\n", __func__); videobuf_dvb_dealloc_frontends(&dev->frontends); return -ENOMEM; } } return 0; } static const u8 samsung_smt_7020_inittab[] = { 0x01, 0x15, 0x02, 0x00, 0x03, 0x00, 0x04, 0x7D, 0x05, 0x0F, 0x06, 0x02, 0x07, 0x00, 0x08, 0x60, 0x0A, 0xC2, 0x0B, 0x00, 0x0C, 0x01, 0x0D, 0x81, 0x0E, 0x44, 0x0F, 0x09, 0x10, 0x3C, 0x11, 0x84, 0x12, 0xDA, 0x13, 0x99, 0x14, 0x8D, 0x15, 0xCE, 0x16, 0xE8, 0x17, 0x43, 0x18, 0x1C, 0x19, 0x1B, 0x1A, 0x1D, 0x1C, 0x12, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00, 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, 0x28, 0x02, 0x29, 0x28, 0x2A, 0x14, 0x2B, 0x0F, 0x2C, 0x09, 0x2D, 0x05, 0x31, 0x1F, 0x32, 0x19, 0x33, 0xFC, 0x34, 0x13, 0xff, 0xff, }; static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct cx8802_dev *dev = fe->dvb->priv; u8 buf[4]; u32 div; struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) }; div = params->frequency / 125; buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = 0x84; /* 0xC4 */ buf[3] = 0x00; if (params->frequency < 1500000) buf[3] |= 0x10; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&dev->core->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static int samsung_smt_7020_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct cx8802_dev *dev = fe->dvb->priv; struct cx88_core *core = dev->core; cx_set(MO_GP0_IO, 0x0800); switch (tone) { case SEC_TONE_ON: cx_set(MO_GP0_IO, 0x08); break; case SEC_TONE_OFF: cx_clear(MO_GP0_IO, 0x08); break; default: return -EINVAL; } return 0; } static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx8802_dev *dev = fe->dvb->priv; struct cx88_core *core = dev->core; u8 data; struct i2c_msg msg = { .addr = 8, .flags = 0, .buf = &data, .len = sizeof(data) }; cx_set(MO_GP0_IO, 0x8000); switch (voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: data = ISL6421_EN1 | ISL6421_LLC1; cx_clear(MO_GP0_IO, 0x80); break; case SEC_VOLTAGE_18: data = ISL6421_EN1 | ISL6421_LLC1 | ISL6421_VSEL1; cx_clear(MO_GP0_IO, 0x80); break; default: return -EINVAL; }; return (i2c_transfer(&dev->core->i2c_adap, &msg, 1) == 1) ? 0 : -EIO; } static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { u8 aclk = 0; u8 bclk = 0; if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; } else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; } else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; } else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; } else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; } else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; } stv0299_writereg(fe, 0x13, aclk); stv0299_writereg(fe, 0x14, bclk); stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); stv0299_writereg(fe, 0x21, ratio & 0xf0); return 0; } static const struct stv0299_config samsung_stv0299_config = { .demod_address = 0x68, .inittab = samsung_smt_7020_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_LK, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = samsung_smt_7020_stv0299_set_symbol_rate, }; static int dvb_register(struct cx8802_dev *dev) { struct cx88_core *core = dev->core; struct videobuf_dvb_frontend *fe0, *fe1 = NULL; int mfe_shared = 0; /* bus not shared by default */ if (0 != core->i2c_rc) { printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name); goto frontend_detach; } /* Get the first frontend */ fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); if (!fe0) goto frontend_detach; /* multi-frontend gate control is undefined or defaults to fe0 */ dev->frontends.gate = 0; /* Sets the gate control callback to be used by i2c command calls */ core->gate_ctrl = cx88_dvb_gate_ctrl; /* init frontend(s) */ switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_DVB_T1: fe0->dvb.frontend = dvb_attach(cx22702_attach, &connexant_refboard_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61, &core->i2c_adap, DVB_PLL_THOMSON_DTT759X)) goto frontend_detach; } break; case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1: case CX88_BOARD_CONEXANT_DVB_T1: case CX88_BOARD_KWORLD_DVB_T_CX22702: case CX88_BOARD_WINFAST_DTV1000: fe0->dvb.frontend = dvb_attach(cx22702_attach, &connexant_refboard_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60, &core->i2c_adap, DVB_PLL_THOMSON_DTT7579)) goto frontend_detach; } break; case CX88_BOARD_WINFAST_DTV2000H: case CX88_BOARD_WINFAST_DTV2000H_J: case CX88_BOARD_HAUPPAUGE_HVR1100: case CX88_BOARD_HAUPPAUGE_HVR1100LP: case CX88_BOARD_HAUPPAUGE_HVR1300: fe0->dvb.frontend = dvb_attach(cx22702_attach, &hauppauge_hvr_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3)) goto frontend_detach; } break; case CX88_BOARD_HAUPPAUGE_HVR3000: /* MFE frontend 1 */ mfe_shared = 1; dev->frontends.gate = 2; /* DVB-S init */ fe0->dvb.frontend = dvb_attach(cx24123_attach, &hauppauge_novas_config, &dev->core->i2c_adap); if (fe0->dvb.frontend) { if (!dvb_attach(isl6421_attach, fe0->dvb.frontend, &dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) goto frontend_detach; } /* MFE frontend 2 */ fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); if (!fe1) goto frontend_detach; /* DVB-T init */ fe1->dvb.frontend = dvb_attach(cx22702_attach, &hauppauge_hvr_config, &dev->core->i2c_adap); if (fe1->dvb.frontend) { fe1->dvb.frontend->id = 1; if (!dvb_attach(simple_tuner_attach, fe1->dvb.frontend, &dev->core->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS: fe0->dvb.frontend = dvb_attach(mt352_attach, &dvico_fusionhdtv, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60, NULL, DVB_PLL_THOMSON_DTT7579)) goto frontend_detach; break; } /* ZL10353 replaces MT352 on later cards */ fe0->dvb.frontend = dvb_attach(zl10353_attach, &dvico_fusionhdtv_plus_v1_1, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60, NULL, DVB_PLL_THOMSON_DTT7579)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL: /* The tin box says DEE1601, but it seems to be DTT7579 * compatible, with a slightly different MT352 AGC gain. */ fe0->dvb.frontend = dvb_attach(mt352_attach, &dvico_fusionhdtv_dual, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61, NULL, DVB_PLL_THOMSON_DTT7579)) goto frontend_detach; break; } /* ZL10353 replaces MT352 on later cards */ fe0->dvb.frontend = dvb_attach(zl10353_attach, &dvico_fusionhdtv_plus_v1_1, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61, NULL, DVB_PLL_THOMSON_DTT7579)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1: fe0->dvb.frontend = dvb_attach(mt352_attach, &dvico_fusionhdtv, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61, NULL, DVB_PLL_LG_Z201)) goto frontend_detach; } break; case CX88_BOARD_KWORLD_DVB_T: case CX88_BOARD_DNTV_LIVE_DVB_T: case CX88_BOARD_ADSTECH_DVB_T_PCI: fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x61, NULL, DVB_PLL_UNKNOWN_1)) goto frontend_detach; } break; case CX88_BOARD_DNTV_LIVE_DVB_T_PRO: #if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE)) /* MT352 is on a secondary I2C bus made from some GPIO lines */ fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config, &dev->vp3054->adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3)) goto frontend_detach; } #else printk(KERN_ERR "%s/2: built without vp3054 support\n", core->name); #endif break; case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID: fe0->dvb.frontend = dvb_attach(zl10353_attach, &dvico_fusionhdtv_hybrid, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_THOMSON_FE6600)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: fe0->dvb.frontend = dvb_attach(zl10353_attach, &dvico_fusionhdtv_xc3028, &core->i2c_adap); if (fe0->dvb.frontend == NULL) fe0->dvb.frontend = dvb_attach(mt352_attach, &dvico_fusionhdtv_mt352_xc3028, &core->i2c_adap); /* * On this board, the demod provides the I2C bus pullup. * We must not permit gate_ctrl to be performed, or * the xc3028 cannot communicate on the bus. */ if (fe0->dvb.frontend) fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; if (attach_xc3028(0x61, dev) < 0) goto frontend_detach; break; case CX88_BOARD_PCHDTV_HD3000: fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_THOMSON_DTT761X)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q: dev->ts_gen_cntrl = 0x08; /* Do a hardware reset of chip before using it. */ cx_clear(MO_GP0_IO, 1); mdelay(100); cx_set(MO_GP0_IO, 1); mdelay(200); /* Select RF connector callback */ fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set; fe0->dvb.frontend = dvb_attach(lgdt330x_attach, &fusionhdtv_3_gold, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_MICROTUNE_4042FI5)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T: dev->ts_gen_cntrl = 0x08; /* Do a hardware reset of chip before using it. */ cx_clear(MO_GP0_IO, 1); mdelay(100); cx_set(MO_GP0_IO, 9); mdelay(200); fe0->dvb.frontend = dvb_attach(lgdt330x_attach, &fusionhdtv_3_gold, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_THOMSON_DTT761X)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD: dev->ts_gen_cntrl = 0x08; /* Do a hardware reset of chip before using it. */ cx_clear(MO_GP0_IO, 1); mdelay(100); cx_set(MO_GP0_IO, 1); mdelay(200); fe0->dvb.frontend = dvb_attach(lgdt330x_attach, &fusionhdtv_5_gold, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_LG_TDVS_H06XF)) goto frontend_detach; if (!dvb_attach(tda9887_attach, fe0->dvb.frontend, &core->i2c_adap, 0x43)) goto frontend_detach; } break; case CX88_BOARD_PCHDTV_HD5500: dev->ts_gen_cntrl = 0x08; /* Do a hardware reset of chip before using it. */ cx_clear(MO_GP0_IO, 1); mdelay(100); cx_set(MO_GP0_IO, 1); mdelay(200); fe0->dvb.frontend = dvb_attach(lgdt330x_attach, &pchdtv_hd5500, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_LG_TDVS_H06XF)) goto frontend_detach; if (!dvb_attach(tda9887_attach, fe0->dvb.frontend, &core->i2c_adap, 0x43)) goto frontend_detach; } break; case CX88_BOARD_ATI_HDTVWONDER: fe0->dvb.frontend = dvb_attach(nxt200x_attach, &ati_hdtvwonder, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend, &core->i2c_adap, 0x61, TUNER_PHILIPS_TUV1236D)) goto frontend_detach; } break; case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1: case CX88_BOARD_HAUPPAUGE_NOVASE2_S1: fe0->dvb.frontend = dvb_attach(cx24123_attach, &hauppauge_novas_config, &core->i2c_adap); if (fe0->dvb.frontend) { if (!dvb_attach(isl6421_attach, fe0->dvb.frontend, &core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) goto frontend_detach; } break; case CX88_BOARD_KWORLD_DVBS_100: fe0->dvb.frontend = dvb_attach(cx24123_attach, &kworld_dvbs_100_config, &core->i2c_adap); if (fe0->dvb.frontend) { core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = kworld_dvbs_100_set_voltage; } break; case CX88_BOARD_GENIATECH_DVBS: fe0->dvb.frontend = dvb_attach(cx24123_attach, &geniatech_dvbs_config, &core->i2c_adap); if (fe0->dvb.frontend) { core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage; } break; case CX88_BOARD_PINNACLE_PCTV_HD_800i: fe0->dvb.frontend = dvb_attach(s5h1409_attach, &pinnacle_pctv_hd_800i_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(xc5000_attach, fe0->dvb.frontend, &core->i2c_adap, &pinnacle_pctv_hd_800i_tuner_config)) goto frontend_detach; } break; case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: fe0->dvb.frontend = dvb_attach(s5h1409_attach, &dvico_hdtv5_pci_nano_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { struct dvb_frontend *fe; struct xc2028_config cfg = { .i2c_adap = &core->i2c_adap, .i2c_addr = 0x61, }; static struct xc2028_ctrl ctl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, .scode_table = XC3028_FE_OREN538, }; fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg); if (fe != NULL && fe->ops.tuner_ops.set_config != NULL) fe->ops.tuner_ops.set_config(fe, &ctl); } break; case CX88_BOARD_PINNACLE_HYBRID_PCTV: case CX88_BOARD_WINFAST_DTV1800H: fe0->dvb.frontend = dvb_attach(zl10353_attach, &cx88_pinnacle_hybrid_pctv, &core->i2c_adap); if (fe0->dvb.frontend) { fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; if (attach_xc3028(0x61, dev) < 0) goto frontend_detach; } break; case CX88_BOARD_GENIATECH_X8000_MT: dev->ts_gen_cntrl = 0x00; fe0->dvb.frontend = dvb_attach(zl10353_attach, &cx88_geniatech_x8000_mt, &core->i2c_adap); if (attach_xc3028(0x61, dev) < 0) goto frontend_detach; break; case CX88_BOARD_KWORLD_ATSC_120: fe0->dvb.frontend = dvb_attach(s5h1409_attach, &kworld_atsc_120_config, &core->i2c_adap); if (attach_xc3028(0x61, dev) < 0) goto frontend_detach; break; case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: fe0->dvb.frontend = dvb_attach(s5h1411_attach, &dvico_fusionhdtv7_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(xc5000_attach, fe0->dvb.frontend, &core->i2c_adap, &dvico_fusionhdtv7_tuner_config)) goto frontend_detach; } break; case CX88_BOARD_HAUPPAUGE_HVR4000: /* MFE frontend 1 */ mfe_shared = 1; dev->frontends.gate = 2; /* DVB-S/S2 Init */ fe0->dvb.frontend = dvb_attach(cx24116_attach, &hauppauge_hvr4000_config, &dev->core->i2c_adap); if (fe0->dvb.frontend) { if (!dvb_attach(isl6421_attach, fe0->dvb.frontend, &dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) goto frontend_detach; } /* MFE frontend 2 */ fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); if (!fe1) goto frontend_detach; /* DVB-T Init */ fe1->dvb.frontend = dvb_attach(cx22702_attach, &hauppauge_hvr_config, &dev->core->i2c_adap); if (fe1->dvb.frontend) { fe1->dvb.frontend->id = 1; if (!dvb_attach(simple_tuner_attach, fe1->dvb.frontend, &dev->core->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3)) goto frontend_detach; } break; case CX88_BOARD_HAUPPAUGE_HVR4000LITE: fe0->dvb.frontend = dvb_attach(cx24116_attach, &hauppauge_hvr4000_config, &dev->core->i2c_adap); if (fe0->dvb.frontend) { if (!dvb_attach(isl6421_attach, fe0->dvb.frontend, &dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) goto frontend_detach; } break; case CX88_BOARD_PROF_6200: case CX88_BOARD_TBS_8910: case CX88_BOARD_TEVII_S420: fe0->dvb.frontend = dvb_attach(stv0299_attach, &tevii_tuner_sharp_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60, &core->i2c_adap, DVB_PLL_OPERA1)) goto frontend_detach; core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; } else { fe0->dvb.frontend = dvb_attach(stv0288_attach, &tevii_tuner_earda_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61, &core->i2c_adap)) goto frontend_detach; core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; } } break; case CX88_BOARD_TEVII_S460: fe0->dvb.frontend = dvb_attach(cx24116_attach, &tevii_s460_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; break; case CX88_BOARD_TEVII_S464: fe0->dvb.frontend = dvb_attach(ds3000_attach, &tevii_ds3000_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; break; case CX88_BOARD_OMICOM_SS4_PCI: case CX88_BOARD_TBS_8920: case CX88_BOARD_PROF_7300: case CX88_BOARD_SATTRADE_ST4200: fe0->dvb.frontend = dvb_attach(cx24116_attach, &hauppauge_hvr4000_config, &core->i2c_adap); if (fe0->dvb.frontend != NULL) fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; break; case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII: fe0->dvb.frontend = dvb_attach(zl10353_attach, &cx88_terratec_cinergy_ht_pci_mkii_config, &core->i2c_adap); if (fe0->dvb.frontend) { fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; if (attach_xc3028(0x61, dev) < 0) goto frontend_detach; } break; case CX88_BOARD_PROF_7301:{ struct dvb_tuner_ops *tuner_ops = NULL; fe0->dvb.frontend = dvb_attach(stv0900_attach, &prof_7301_stv0900_config, &core->i2c_adap, 0); if (fe0->dvb.frontend != NULL) { if (!dvb_attach(stb6100_attach, fe0->dvb.frontend, &prof_7301_stb6100_config, &core->i2c_adap)) goto frontend_detach; tuner_ops = &fe0->dvb.frontend->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; } break; } case CX88_BOARD_SAMSUNG_SMT_7020: dev->ts_gen_cntrl = 0x08; cx_set(MO_GP0_IO, 0x0101); cx_clear(MO_GP0_IO, 0x01); mdelay(100); cx_set(MO_GP0_IO, 0x01); mdelay(200); fe0->dvb.frontend = dvb_attach(stv0299_attach, &samsung_stv0299_config, &dev->core->i2c_adap); if (fe0->dvb.frontend) { fe0->dvb.frontend->ops.tuner_ops.set_params = samsung_smt_7020_tuner_set_params; fe0->dvb.frontend->tuner_priv = &dev->core->i2c_adap; fe0->dvb.frontend->ops.set_voltage = samsung_smt_7020_set_voltage; fe0->dvb.frontend->ops.set_tone = samsung_smt_7020_set_tone; } break; case CX88_BOARD_TWINHAN_VP1027_DVBS: dev->ts_gen_cntrl = 0x00; fe0->dvb.frontend = dvb_attach(mb86a16_attach, &twinhan_vp1027, &core->i2c_adap); if (fe0->dvb.frontend) { core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; fe0->dvb.frontend->ops.set_voltage = vp1027_set_voltage; } break; default: printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n", core->name); break; } if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) { printk(KERN_ERR "%s/2: frontend initialization failed\n", core->name); goto frontend_detach; } /* define general-purpose callback pointer */ fe0->dvb.frontend->callback = cx88_tuner_callback; /* Ensure all frontends negotiate bus access */ fe0->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl; if (fe1) fe1->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl; /* Put the analog decoder in standby to keep it quiet */ call_all(core, core, s_power, 0); /* register everything */ return videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev, &dev->pci->dev, adapter_nr, mfe_shared, NULL); frontend_detach: core->gate_ctrl = NULL; videobuf_dvb_dealloc_frontends(&dev->frontends); return -EINVAL; } /* ----------------------------------------------------------- */ /* CX8802 MPEG -> mini driver - We have been given the hardware */ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; dprintk( 1, "%s\n", __func__); switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* We arrive here with either the cx23416 or the cx22702 * on the bus. Take the bus from the cx23416 and enable the * cx22702 demod */ /* Toggle reset on cx22702 leaving i2c active */ cx_set(MO_GP0_IO, 0x00000080); udelay(1000); cx_clear(MO_GP0_IO, 0x00000080); udelay(50); cx_set(MO_GP0_IO, 0x00000080); udelay(1000); /* enable the cx22702 pins */ cx_clear(MO_GP0_IO, 0x00000004); udelay(1000); break; case CX88_BOARD_HAUPPAUGE_HVR3000: case CX88_BOARD_HAUPPAUGE_HVR4000: /* Toggle reset on cx22702 leaving i2c active */ cx_set(MO_GP0_IO, 0x00000080); udelay(1000); cx_clear(MO_GP0_IO, 0x00000080); udelay(50); cx_set(MO_GP0_IO, 0x00000080); udelay(1000); switch (core->dvbdev->frontends.active_fe_id) { case 1: /* DVB-S/S2 Enabled */ /* tri-state the cx22702 pins */ cx_set(MO_GP0_IO, 0x00000004); /* Take the cx24116/cx24123 out of reset */ cx_write(MO_SRST_IO, 1); core->dvbdev->ts_gen_cntrl = 0x02; /* Parallel IO */ break; case 2: /* DVB-T Enabled */ /* Put the cx24116/cx24123 into reset */ cx_write(MO_SRST_IO, 0); /* enable the cx22702 pins */ cx_clear(MO_GP0_IO, 0x00000004); core->dvbdev->ts_gen_cntrl = 0x0c; /* Serial IO */ break; } udelay(1000); break; default: err = -ENODEV; } return err; } /* CX8802 MPEG -> mini driver - We no longer have the hardware */ static int cx8802_dvb_advise_release(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; dprintk( 1, "%s\n", __func__); switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* Do Nothing, leave the cx22702 on the bus. */ break; case CX88_BOARD_HAUPPAUGE_HVR3000: case CX88_BOARD_HAUPPAUGE_HVR4000: break; default: err = -ENODEV; } return err; } static int cx8802_dvb_probe(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; struct cx8802_dev *dev = drv->core->dvbdev; int err; struct videobuf_dvb_frontend *fe; int i; dprintk( 1, "%s\n", __func__); dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n", core->boardnr, core->name, core->pci_bus, core->pci_slot); err = -ENODEV; if (!(core->board.mpeg & CX88_MPEG_DVB)) goto fail_core; /* If vp3054 isn't enabled, a stub will just return 0 */ err = vp3054_i2c_probe(dev); if (0 != err) goto fail_core; /* dvb stuff */ printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name); dev->ts_gen_cntrl = 0x0c; err = cx8802_alloc_frontends(dev); if (err) goto fail_core; err = -ENODEV; for (i = 1; i <= core->board.num_frontends; i++) { fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i); if (fe == NULL) { printk(KERN_ERR "%s() failed to get frontend(%d)\n", __func__, i); goto fail_probe; } videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_TOP, sizeof(struct cx88_buffer), dev, NULL); /* init struct videobuf_dvb */ fe->dvb.name = dev->core->name; } err = dvb_register(dev); if (err) /* frontends/adapter de-allocated in dvb_register */ printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n", core->name, err); return err; fail_probe: videobuf_dvb_dealloc_frontends(&core->dvbdev->frontends); fail_core: return err; } static int cx8802_dvb_remove(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; struct cx8802_dev *dev = drv->core->dvbdev; dprintk( 1, "%s\n", __func__); videobuf_dvb_unregister_bus(&dev->frontends); vp3054_i2c_remove(dev); core->gate_ctrl = NULL; return 0; } static struct cx8802_driver cx8802_dvb_driver = { .type_id = CX88_MPEG_DVB, .hw_access = CX8802_DRVCTL_SHARED, .probe = cx8802_dvb_probe, .remove = cx8802_dvb_remove, .advise_acquire = cx8802_dvb_advise_acquire, .advise_release = cx8802_dvb_advise_release, }; static int __init dvb_init(void) { printk(KERN_INFO "cx88/2: cx2388x dvb driver version %d.%d.%d loaded\n", (CX88_VERSION_CODE >> 16) & 0xff, (CX88_VERSION_CODE >> 8) & 0xff, CX88_VERSION_CODE & 0xff); #ifdef SNAPSHOT printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); #endif return cx8802_register_driver(&cx8802_dvb_driver); } static void __exit dvb_fini(void) { cx8802_unregister_driver(&cx8802_dvb_driver); } module_init(dvb_init); module_exit(dvb_fini); /* * Local variables: * c-basic-offset: 8 * compile-command: "make DVB=1" * End: */
gpl-2.0
libcg/android_kernel_samsung_smdk4412_hwc
drivers/infiniband/core/umem.c
3030
7980
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/hugetlb.h> #include <linux/dma-attrs.h> #include <linux/slab.h> #include "uverbs.h" #define IB_UMEM_MAX_PAGE_CHUNK \ ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) { struct ib_umem_chunk *chunk, *tmp; int i; list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { ib_dma_unmap_sg(dev, chunk->page_list, chunk->nents, DMA_BIDIRECTIONAL); for (i = 0; i < chunk->nents; ++i) { struct page *page = sg_page(&chunk->page_list[i]); if (umem->writable && dirty) set_page_dirty_lock(page); put_page(page); } kfree(chunk); } } /** * ib_umem_get - Pin and DMA map userspace memory. * @context: userspace context to pin memory for * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned * @dmasync: flush in-flight DMA when the memory region is written */ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, size_t size, int access, int dmasync) { struct ib_umem *umem; struct page **page_list; struct vm_area_struct **vma_list; struct ib_umem_chunk *chunk; unsigned long locked; unsigned long lock_limit; unsigned long cur_base; unsigned long npages; int ret; int off; int i; DEFINE_DMA_ATTRS(attrs); if (dmasync) dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); if (!can_do_mlock()) return ERR_PTR(-EPERM); umem = kmalloc(sizeof *umem, GFP_KERNEL); if (!umem) return ERR_PTR(-ENOMEM); umem->context = context; umem->length = size; umem->offset = addr & ~PAGE_MASK; umem->page_size = PAGE_SIZE; /* * We ask for writable memory if any access flags other than * "remote read" are set. "Local write" and "remote write" * obviously require write access. "Remote atomic" can do * things like fetch and add, which will modify memory, and * "MW bind" can change permissions by binding a window. */ umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); /* We assume the memory is from hugetlb until proved otherwise */ umem->hugetlb = 1; INIT_LIST_HEAD(&umem->chunk_list); page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { kfree(umem); return ERR_PTR(-ENOMEM); } /* * if we can't alloc the vma_list, it's not so bad; * just assume the memory is not hugetlb memory */ vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); if (!vma_list) umem->hugetlb = 0; npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; down_write(&current->mm->mmap_sem); locked = npages + current->mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { ret = -ENOMEM; goto out; } cur_base = addr & PAGE_MASK; ret = 0; while (npages) { ret = get_user_pages(current, current->mm, cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), 1, !umem->writable, page_list, vma_list); if (ret < 0) goto out; cur_base += ret * PAGE_SIZE; npages -= ret; off = 0; while (ret) { chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) * min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), GFP_KERNEL); if (!chunk) { ret = -ENOMEM; goto out; } chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); sg_init_table(chunk->page_list, chunk->nents); for (i = 0; i < chunk->nents; ++i) { if (vma_list && !is_vm_hugetlb_page(vma_list[i + off])) umem->hugetlb = 0; sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); } chunk->nmap = ib_dma_map_sg_attrs(context->device, &chunk->page_list[0], chunk->nents, DMA_BIDIRECTIONAL, &attrs); if (chunk->nmap <= 0) { for (i = 0; i < chunk->nents; ++i) put_page(sg_page(&chunk->page_list[i])); kfree(chunk); ret = -ENOMEM; goto out; } ret -= chunk->nents; off += chunk->nents; list_add_tail(&chunk->list, &umem->chunk_list); } ret = 0; } out: if (ret < 0) { __ib_umem_release(context->device, umem, 0); kfree(umem); } else current->mm->locked_vm = locked; up_write(&current->mm->mmap_sem); if (vma_list) free_page((unsigned long) vma_list); free_page((unsigned long) page_list); return ret < 0 ? ERR_PTR(ret) : umem; } EXPORT_SYMBOL(ib_umem_get); static void ib_umem_account(struct work_struct *work) { struct ib_umem *umem = container_of(work, struct ib_umem, work); down_write(&umem->mm->mmap_sem); umem->mm->locked_vm -= umem->diff; up_write(&umem->mm->mmap_sem); mmput(umem->mm); kfree(umem); } /** * ib_umem_release - release memory pinned with ib_umem_get * @umem: umem struct to release */ void ib_umem_release(struct ib_umem *umem) { struct ib_ucontext *context = umem->context; struct mm_struct *mm; unsigned long diff; __ib_umem_release(umem->context->device, umem, 1); mm = get_task_mm(current); if (!mm) { kfree(umem); return; } diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; /* * We may be called with the mm's mmap_sem already held. This * can happen when a userspace munmap() is the call that drops * the last reference to our file and calls our release * method. If there are memory regions to destroy, we'll end * up here and not be able to take the mmap_sem. In that case * we defer the vm_locked accounting to the system workqueue. */ if (context->closing) { if (!down_write_trylock(&mm->mmap_sem)) { INIT_WORK(&umem->work, ib_umem_account); umem->mm = mm; umem->diff = diff; queue_work(ib_wq, &umem->work); return; } } else down_write(&mm->mmap_sem); current->mm->locked_vm -= diff; up_write(&mm->mmap_sem); mmput(mm); kfree(umem); } EXPORT_SYMBOL(ib_umem_release); int ib_umem_page_count(struct ib_umem *umem) { struct ib_umem_chunk *chunk; int shift; int i; int n; shift = ilog2(umem->page_size); n = 0; list_for_each_entry(chunk, &umem->chunk_list, list) for (i = 0; i < chunk->nmap; ++i) n += sg_dma_len(&chunk->page_list[i]) >> shift; return n; } EXPORT_SYMBOL(ib_umem_page_count);
gpl-2.0
mqmaker/linux
arch/mn10300/kernel/irq.c
3542
9018
/* MN10300 Arch-specific interrupt handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <asm/setup.h> #include <asm/serial-regs.h> unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 }; EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); #ifdef CONFIG_SMP static char irq_affinity_online[NR_IRQS] = { [0 ... NR_IRQS - 1] = 0 }; #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { [0 ... NR_IRQ_WORDS - 1] = 0 }; #endif /* CONFIG_SMP */ atomic_t irq_err_count; /* * MN10300 interrupt controller operations */ static void mn10300_cpupic_ack(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; u16 tmp; flags = arch_local_cli_save(); GxICR_u8(irq) = GxICR_DETECT; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void __mask_and_set_icr(unsigned int irq, unsigned int mask, unsigned int set) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = (tmp & mask) | set; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void mn10300_cpupic_mask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, 0); } static void mn10300_cpupic_mask_ack(struct irq_data *d) { unsigned int irq = d->irq; #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; tmp = GxICR(irq); } else { u16 tmp2; tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL); tmp2 = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(d->affinity, cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); #endif /* CONFIG_SMP */ } static void mn10300_cpupic_unmask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE); } static void mn10300_cpupic_unmask_clear(struct irq_data *d) { unsigned int irq = d->irq; /* the MN10300 PIC latches its interrupt request bit, even after the * device has ceased to assert its interrupt line and the interrupt * channel has been disabled in the PIC, so for level-triggered * interrupts we need to clear the request bit when we re-enable */ #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = GxICR(irq); } else { tmp = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(d->affinity, cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); #endif /* CONFIG_SMP */ } #ifdef CONFIG_SMP static int mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, bool force) { unsigned long flags; flags = arch_local_cli_save(); set_bit(d->irq, irq_affinity_request); arch_local_irq_restore(flags); return 0; } #endif /* CONFIG_SMP */ /* * MN10300 PIC level-triggered IRQ handling. * * The PIC has no 'ACK' function per se. It is possible to clear individual * channel latches, but each latch relatches whether or not the channel is * masked, so we need to clear the latch when we unmask the channel. * * Also for this reason, we don't supply an ack() op (it's unused anyway if * mask_ack() is provided), and mask_ack() just masks. */ static struct irq_chip mn10300_cpu_pic_level = { .name = "cpu_l", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask_clear, .irq_ack = NULL, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask, .irq_unmask = mn10300_cpupic_unmask_clear, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * MN10300 PIC edge-triggered IRQ handling. * * We use the latch clearing function of the PIC as the 'ACK' function. */ static struct irq_chip mn10300_cpu_pic_edge = { .name = "cpu_e", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask, .irq_ack = mn10300_cpupic_ack, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask_ack, .irq_unmask = mn10300_cpupic_unmask, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(int irq) { printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); } /* * change the level at which an IRQ executes * - must not be called whilst interrupts are being processed! */ void set_intr_level(int irq, u16 level) { BUG_ON(in_interrupt()); __mask_and_set_icr(irq, GxICR_ENABLE, level); } /* * mark an interrupt to be ACK'd after interrupt handlers have been run rather * than before */ void mn10300_set_lateack_irq_type(int irq) { irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, handle_level_irq); } /* * initialise the interrupt system */ void __init init_IRQ(void) { int irq; for (irq = 0; irq < NR_IRQS; irq++) if (irq_get_chip(irq) == &no_irq_chip) /* due to the PIC latching interrupt requests, even * when the IRQ is disabled, IRQ_PENDING is superfluous * and we can use handle_level_irq() for edge-triggered * interrupts */ irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, handle_level_irq); unit_init_IRQ(); } /* * handle normal device IRQs */ asmlinkage void do_IRQ(void) { unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; unsigned int cpu_id = smp_processor_id(); int irq; sp = current_stack_pointer(); BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); /* make sure local_irq_enable() doesn't muck up the interrupt priority * setting in EPSW */ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; local_save_flags(epsw); __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; #ifdef CONFIG_MN10300_WD_TIMER __IRQ_STAT(cpu_id, __irq_count)++; #endif irq_enter(); for (;;) { /* ask the interrupt controller for the next IRQ to process * - the result we get depends on EPSW.IM */ irq = IAGR & IAGR_GN; if (!irq) break; local_irq_restore(irq_disabled_epsw); generic_handle_irq(irq >> 2); /* restore IRQ controls for IAGR access */ local_irq_restore(epsw); } __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; irq_exit(); } /* * Display interrupt management information through /proc/interrupts */ int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_MN10300_WD_TIMER int j; seq_printf(p, "%*s: ", prec, "NMI"); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); #endif seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } #ifdef CONFIG_HOTPLUG_CPU void migrate_irqs(void) { int irq; unsigned int self, new; unsigned long flags; self = smp_processor_id(); for (irq = 0; irq < NR_IRQS; irq++) { struct irq_data *data = irq_get_irq_data(irq); if (irqd_is_per_cpu(data)) continue; if (cpumask_test_cpu(self, &data->affinity) && !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { int cpu_id; cpu_id = cpumask_first(cpu_online_mask); cpumask_set_cpu(cpu_id, &data->affinity); } /* We need to operate irq_affinity_online atomically. */ arch_local_cli_save(flags); if (irq_affinity_online[irq] == self) { u16 x, tmp; x = GxICR(irq); GxICR(irq) = x & GxICR_LEVEL; tmp = GxICR(irq); new = cpumask_any_and(&data->affinity, cpu_online_mask); irq_affinity_online[irq] = new; CROSS_GxICR(irq, new) = (x & GxICR_LEVEL) | GxICR_DETECT; tmp = CROSS_GxICR(irq, new); x &= GxICR_LEVEL | GxICR_ENABLE; if (GxICR(irq) & GxICR_REQUEST) x |= GxICR_REQUEST | GxICR_DETECT; CROSS_GxICR(irq, new) = x; tmp = CROSS_GxICR(irq, new); } arch_local_irq_restore(flags); } } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
schlund/2.6.35-marvellous-kernel
drivers/char/hvc_iseries.c
4054
15186
/* * iSeries vio driver interface to hvc_console.c * * This code is based heavily on hvc_vio.c and viocons.c * * Copyright (C) 2006 Stephen Rothwell, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdarg.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/console.h> #include <asm/hvconsole.h> #include <asm/vio.h> #include <asm/prom.h> #include <asm/firmware.h> #include <asm/iseries/vio.h> #include <asm/iseries/hv_call.h> #include <asm/iseries/hv_lp_config.h> #include <asm/iseries/hv_lp_event.h> #include "hvc_console.h" #define VTTY_PORTS 10 static DEFINE_SPINLOCK(consolelock); static DEFINE_SPINLOCK(consoleloglock); static const char hvc_driver_name[] = "hvc_console"; #define IN_BUF_SIZE 200 /* * Our port information. */ static struct port_info { HvLpIndex lp; u64 seq; /* sequence number of last HV send */ u64 ack; /* last ack from HV */ struct hvc_struct *hp; int in_start; int in_end; unsigned char in_buf[IN_BUF_SIZE]; } port_info[VTTY_PORTS] = { [ 0 ... VTTY_PORTS - 1 ] = { .lp = HvLpIndexInvalid } }; #define viochar_is_console(pi) ((pi) == &port_info[0]) static struct vio_device_id hvc_driver_table[] __devinitdata = { {"serial", "IBM,iSeries-vty"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, hvc_driver_table); static void hvlog(char *fmt, ...) { int i; unsigned long flags; va_list args; static char buf[256]; spin_lock_irqsave(&consoleloglock, flags); va_start(args, fmt); i = vscnprintf(buf, sizeof(buf) - 1, fmt, args); va_end(args); buf[i++] = '\r'; HvCall_writeLogBuffer(buf, i); spin_unlock_irqrestore(&consoleloglock, flags); } /* * Initialize the common fields in a charLpEvent */ static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp) { struct HvLpEvent *hev = &viochar->event; memset(viochar, 0, sizeof(struct viocharlpevent)); hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK | HV_LP_EVENT_INT; hev->xType = HvLpEvent_Type_VirtualIo; hev->xSubtype = viomajorsubtype_chario | viochardata; hev->xSourceLp = HvLpConfig_getLpIndex(); hev->xTargetLp = lp; hev->xSizeMinus1 = sizeof(struct viocharlpevent); hev->xSourceInstanceId = viopath_sourceinst(lp); hev->xTargetInstanceId = viopath_targetinst(lp); } static int get_chars(uint32_t vtermno, char *buf, int count) { struct port_info *pi; int n = 0; unsigned long flags; if (vtermno >= VTTY_PORTS) return -EINVAL; if (count == 0) return 0; pi = &port_info[vtermno]; spin_lock_irqsave(&consolelock, flags); if (pi->in_end == 0) goto done; n = pi->in_end - pi->in_start; if (n > count) n = count; memcpy(buf, &pi->in_buf[pi->in_start], n); pi->in_start += n; if (pi->in_start == pi->in_end) { pi->in_start = 0; pi->in_end = 0; } done: spin_unlock_irqrestore(&consolelock, flags); return n; } static int put_chars(uint32_t vtermno, const char *buf, int count) { struct viocharlpevent *viochar; struct port_info *pi; HvLpEvent_Rc hvrc; unsigned long flags; int sent = 0; if (vtermno >= VTTY_PORTS) return -EINVAL; pi = &port_info[vtermno]; spin_lock_irqsave(&consolelock, flags); if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) { HvCall_writeLogBuffer(buf, count); sent = count; goto done; } viochar = vio_get_event_buffer(viomajorsubtype_chario); if (viochar == NULL) { hvlog("\n\rviocons: Can't get viochar buffer."); goto done; } while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) { int len; len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count; if (viochar_is_console(pi)) HvCall_writeLogBuffer(buf, len); init_data_event(viochar, pi->lp); viochar->len = len; viochar->event.xCorrelationToken = pi->seq++; viochar->event.xSizeMinus1 = offsetof(struct viocharlpevent, data) + len; memcpy(viochar->data, buf, len); hvrc = HvCallEvent_signalLpEvent(&viochar->event); if (hvrc) hvlog("\n\rerror sending event! return code %d\n\r", (int)hvrc); sent += len; count -= len; buf += len; } vio_free_event_buffer(viomajorsubtype_chario, viochar); done: spin_unlock_irqrestore(&consolelock, flags); return sent; } static const struct hv_ops hvc_get_put_ops = { .get_chars = get_chars, .put_chars = put_chars, .notifier_add = notifier_add_irq, .notifier_del = notifier_del_irq, .notifier_hangup = notifier_hangup_irq, }; static int __devinit hvc_vio_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct hvc_struct *hp; struct port_info *pi; /* probed with invalid parameters. */ if (!vdev || !id) return -EPERM; if (vdev->unit_address >= VTTY_PORTS) return -ENODEV; pi = &port_info[vdev->unit_address]; hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops, VIOCHAR_MAX_DATA); if (IS_ERR(hp)) return PTR_ERR(hp); pi->hp = hp; dev_set_drvdata(&vdev->dev, pi); return 0; } static int __devexit hvc_vio_remove(struct vio_dev *vdev) { struct port_info *pi = dev_get_drvdata(&vdev->dev); struct hvc_struct *hp = pi->hp; return hvc_remove(hp); } static struct vio_driver hvc_vio_driver = { .id_table = hvc_driver_table, .probe = hvc_vio_probe, .remove = __devexit_p(hvc_vio_remove), .driver = { .name = hvc_driver_name, .owner = THIS_MODULE, } }; static void hvc_open_event(struct HvLpEvent *event) { unsigned long flags; struct viocharlpevent *cevent = (struct viocharlpevent *)event; u8 port = cevent->virtual_device; struct port_info *pi; int reject = 0; if (hvlpevent_is_ack(event)) { if (port >= VTTY_PORTS) return; spin_lock_irqsave(&consolelock, flags); pi = &port_info[port]; if (event->xRc == HvLpEvent_Rc_Good) { pi->seq = pi->ack = 0; /* * This line allows connections from the primary * partition but once one is connected from the * primary partition nothing short of a reboot * of linux will allow access from the hosting * partition again without a required iSeries fix. */ pi->lp = event->xTargetLp; } spin_unlock_irqrestore(&consolelock, flags); if (event->xRc != HvLpEvent_Rc_Good) printk(KERN_WARNING "hvc: handle_open_event: event->xRc == (%d).\n", event->xRc); if (event->xCorrelationToken != 0) { atomic_t *aptr= (atomic_t *)event->xCorrelationToken; atomic_set(aptr, 1); } else printk(KERN_WARNING "hvc: weird...got open ack without atomic\n"); return; } /* This had better require an ack, otherwise complain */ if (!hvlpevent_need_ack(event)) { printk(KERN_WARNING "hvc: viocharopen without ack bit!\n"); return; } spin_lock_irqsave(&consolelock, flags); /* Make sure this is a good virtual tty */ if (port >= VTTY_PORTS) { event->xRc = HvLpEvent_Rc_SubtypeError; cevent->subtype_result_code = viorc_openRejected; /* * Flag state here since we can't printk while holding * the consolelock spinlock. */ reject = 1; } else { pi = &port_info[port]; if ((pi->lp != HvLpIndexInvalid) && (pi->lp != event->xSourceLp)) { /* * If this is tty is already connected to a different * partition, fail. */ event->xRc = HvLpEvent_Rc_SubtypeError; cevent->subtype_result_code = viorc_openRejected; reject = 2; } else { pi->lp = event->xSourceLp; event->xRc = HvLpEvent_Rc_Good; cevent->subtype_result_code = viorc_good; pi->seq = pi->ack = 0; } } spin_unlock_irqrestore(&consolelock, flags); if (reject == 1) printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n"); else if (reject == 2) printk(KERN_WARNING "hvc: open rejected: console in exclusive " "use by another partition.\n"); /* Return the acknowledgement */ HvCallEvent_ackLpEvent(event); } /* * Handle a close charLpEvent. This should ONLY be an Interrupt because the * virtual console should never actually issue a close event to the hypervisor * because the virtual console never goes away. A close event coming from the * hypervisor simply means that there are no client consoles connected to the * virtual console. */ static void hvc_close_event(struct HvLpEvent *event) { unsigned long flags; struct viocharlpevent *cevent = (struct viocharlpevent *)event; u8 port = cevent->virtual_device; if (!hvlpevent_is_int(event)) { printk(KERN_WARNING "hvc: got unexpected close acknowledgement\n"); return; } if (port >= VTTY_PORTS) { printk(KERN_WARNING "hvc: close message from invalid virtual device.\n"); return; } /* For closes, just mark the console partition invalid */ spin_lock_irqsave(&consolelock, flags); if (port_info[port].lp == event->xSourceLp) port_info[port].lp = HvLpIndexInvalid; spin_unlock_irqrestore(&consolelock, flags); } static void hvc_data_event(struct HvLpEvent *event) { unsigned long flags; struct viocharlpevent *cevent = (struct viocharlpevent *)event; struct port_info *pi; int n; u8 port = cevent->virtual_device; if (port >= VTTY_PORTS) { printk(KERN_WARNING "hvc: data on invalid virtual device %d\n", port); return; } if (cevent->len == 0) return; /* * Change 05/01/2003 - Ryan Arnold: If a partition other than * the current exclusive partition tries to send us data * events then just drop them on the floor because we don't * want his stinking data. He isn't authorized to receive * data because he wasn't the first one to get the console, * therefore he shouldn't be allowed to send data either. * This will work without an iSeries fix. */ pi = &port_info[port]; if (pi->lp != event->xSourceLp) return; spin_lock_irqsave(&consolelock, flags); n = IN_BUF_SIZE - pi->in_end; if (n > cevent->len) n = cevent->len; if (n > 0) { memcpy(&pi->in_buf[pi->in_end], cevent->data, n); pi->in_end += n; } spin_unlock_irqrestore(&consolelock, flags); if (n == 0) printk(KERN_WARNING "hvc: input buffer overflow\n"); } static void hvc_ack_event(struct HvLpEvent *event) { struct viocharlpevent *cevent = (struct viocharlpevent *)event; unsigned long flags; u8 port = cevent->virtual_device; if (port >= VTTY_PORTS) { printk(KERN_WARNING "hvc: data on invalid virtual device\n"); return; } spin_lock_irqsave(&consolelock, flags); port_info[port].ack = event->xCorrelationToken; spin_unlock_irqrestore(&consolelock, flags); } static void hvc_config_event(struct HvLpEvent *event) { struct viocharlpevent *cevent = (struct viocharlpevent *)event; if (cevent->data[0] == 0x01) printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n", cevent->data[1], cevent->data[2], cevent->data[3], cevent->data[4]); else printk(KERN_WARNING "hvc: unknown config event\n"); } static void hvc_handle_event(struct HvLpEvent *event) { int charminor; if (event == NULL) return; charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK; switch (charminor) { case viocharopen: hvc_open_event(event); break; case viocharclose: hvc_close_event(event); break; case viochardata: hvc_data_event(event); break; case viocharack: hvc_ack_event(event); break; case viocharconfig: hvc_config_event(event); break; default: if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) { event->xRc = HvLpEvent_Rc_InvalidSubtype; HvCallEvent_ackLpEvent(event); } } } static int __init send_open(HvLpIndex remoteLp, void *sem) { return HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_chario | viocharopen, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(remoteLp), viopath_targetinst(remoteLp), (u64)(unsigned long)sem, VIOVERSION << 16, 0, 0, 0, 0); } static int __init hvc_vio_init(void) { atomic_t wait_flag; int rc; if (!firmware_has_feature(FW_FEATURE_ISERIES)) return -EIO; /* +2 for fudge */ rc = viopath_open(HvLpConfig_getPrimaryLpIndex(), viomajorsubtype_chario, VIOCHAR_WINDOW + 2); if (rc) printk(KERN_WARNING "hvc: error opening to primary %d\n", rc); if (viopath_hostLp == HvLpIndexInvalid) vio_set_hostlp(); /* * And if the primary is not the same as the hosting LP, open to the * hosting lp */ if ((viopath_hostLp != HvLpIndexInvalid) && (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) { printk(KERN_INFO "hvc: open path to hosting (%d)\n", viopath_hostLp); rc = viopath_open(viopath_hostLp, viomajorsubtype_chario, VIOCHAR_WINDOW + 2); /* +2 for fudge */ if (rc) printk(KERN_WARNING "error opening to partition %d: %d\n", viopath_hostLp, rc); } if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0) printk(KERN_WARNING "hvc: error seting handler for console events!\n"); /* * First, try to open the console to the hosting lp. * Wait on a semaphore for the response. */ atomic_set(&wait_flag, 0); if ((viopath_isactive(viopath_hostLp)) && (send_open(viopath_hostLp, &wait_flag) == 0)) { printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp); while (atomic_read(&wait_flag) == 0) mb(); atomic_set(&wait_flag, 0); } /* * If we don't have an active console, try the primary */ if ((!viopath_isactive(port_info[0].lp)) && (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) && (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) { printk(KERN_INFO "hvc: opening console to primary partition\n"); while (atomic_read(&wait_flag) == 0) mb(); } /* Register as a vio device to receive callbacks */ rc = vio_register_driver(&hvc_vio_driver); return rc; } module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */ static void __exit hvc_vio_exit(void) { vio_unregister_driver(&hvc_vio_driver); } module_exit(hvc_vio_exit); /* the device tree order defines our numbering */ static int __init hvc_find_vtys(void) { struct device_node *vty; int num_found = 0; for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; vty = of_find_node_by_name(vty, "vty")) { const uint32_t *vtermno; /* We have statically defined space for only a certain number * of console adapters. */ if ((num_found >= MAX_NR_HVC_CONSOLES) || (num_found >= VTTY_PORTS)) { of_node_put(vty); break; } vtermno = of_get_property(vty, "reg", NULL); if (!vtermno) continue; if (!of_device_is_compatible(vty, "IBM,iSeries-vty")) continue; if (num_found == 0) add_preferred_console("hvc", 0, NULL); hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops); ++num_found; } return num_found; } console_initcall(hvc_find_vtys);
gpl-2.0
ignacio28/android_kernel_lge_msm8610
net/openvswitch/datapath.c
4310
46582
/* * Copyright (c) 2007-2012 Nicira Networks. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/jhash.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/etherdevice.h> #include <linux/genetlink.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/ethtool.h> #include <linux/wait.h> #include <asm/div64.h> #include <linux/highmem.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <linux/inetdevice.h> #include <linux/list.h> #include <linux/openvswitch.h> #include <linux/rculist.h> #include <linux/dmi.h> #include <linux/workqueue.h> #include <net/genetlink.h> #include "datapath.h" #include "flow.h" #include "vport-internal_dev.h" /** * DOC: Locking: * * Writes to device state (add/remove datapath, port, set operations on vports, * etc.) are protected by RTNL. * * Writes to other state (flow table modifications, set miscellaneous datapath * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside * genl_mutex. * * Reads are protected by RCU. * * There are a few special cases (mostly stats) that have their own * synchronization but they nest under all of above and don't interact with * each other. */ /* Global list of datapaths to enable dumping them all out. * Protected by genl_mutex. */ static LIST_HEAD(dps); #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) static void rehash_flow_table(struct work_struct *work); static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ static struct datapath *get_dp(int dp_ifindex) { struct datapath *dp = NULL; struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(&init_net, dp_ifindex); if (dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); if (vport) dp = vport->dp; } rcu_read_unlock(); return dp; } /* Must be called with rcu_read_lock or RTNL lock. */ const char *ovs_dp_name(const struct datapath *dp) { struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]); return vport->ops->get_name(vport); } static int get_dpifindex(struct datapath *dp) { struct vport *local; int ifindex; rcu_read_lock(); local = rcu_dereference(dp->ports[OVSP_LOCAL]); if (local) ifindex = local->ops->get_ifindex(local); else ifindex = 0; rcu_read_unlock(); return ifindex; } static void destroy_dp_rcu(struct rcu_head *rcu) { struct datapath *dp = container_of(rcu, struct datapath, rcu); ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); free_percpu(dp->stats_percpu); kfree(dp); } /* Called with RTNL lock and genl_lock. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; vport = ovs_vport_add(parms); if (!IS_ERR(vport)) { struct datapath *dp = parms->dp; rcu_assign_pointer(dp->ports[parms->port_no], vport); list_add(&vport->node, &dp->port_list); } return vport; } /* Called with RTNL lock. */ void ovs_dp_detach_port(struct vport *p) { ASSERT_RTNL(); /* First drop references to device. */ list_del(&p->node); rcu_assign_pointer(p->dp->ports[p->port_no], NULL); /* Then destroy it. */ ovs_vport_del(p); } /* Must be called with rcu_read_lock. */ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; struct sw_flow_key key; u64 *stats_counter; int error; int key_len; stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_extract(skb, p->port_no, &key, &key_len); if (unlikely(error)) { kfree_skb(skb); return; } /* Look up flow. */ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); if (unlikely(!flow)) { struct dp_upcall_info upcall; upcall.cmd = OVS_PACKET_CMD_MISS; upcall.key = &key; upcall.userdata = NULL; upcall.pid = p->upcall_pid; ovs_dp_upcall(dp, skb, &upcall); consume_skb(skb); stats_counter = &stats->n_missed; goto out; } OVS_CB(skb)->flow = flow; stats_counter = &stats->n_hit; ovs_flow_used(OVS_CB(skb)->flow, skb); ovs_execute_actions(dp, skb); out: /* Update datapath statistics. */ u64_stats_update_begin(&stats->sync); (*stats_counter)++; u64_stats_update_end(&stats->sync); } static struct genl_family dp_packet_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_PACKET_FAMILY, .version = OVS_PACKET_VERSION, .maxattr = OVS_PACKET_ATTR_MAX }; int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; int dp_ifindex; int err; if (upcall_info->pid == 0) { err = -ENOTCONN; goto err; } dp_ifindex = get_dpifindex(dp); if (!dp_ifindex) { err = -ENODEV; goto err; } if (!skb_is_gso(skb)) err = queue_userspace_packet(dp_ifindex, skb, upcall_info); else err = queue_gso_packets(dp_ifindex, skb, upcall_info); if (err) goto err; return 0; err: stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); u64_stats_update_begin(&stats->sync); stats->n_lost++; u64_stats_update_end(&stats->sync); return err; } static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_upcall_info later_info; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); if (IS_ERR(skb)) return PTR_ERR(skb); /* Queue all of the segments. */ skb = segs; do { err = queue_userspace_packet(dp_ifindex, skb, upcall_info); if (err) break; if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { /* The initial flow key extracted by ovs_flow_extract() * in this case is for a first fragment, so we need to * properly mark later fragments. */ later_key = *upcall_info->key; later_key.ip.frag = OVS_FRAG_TYPE_LATER; later_info = *upcall_info; later_info.key = &later_key; upcall_info = &later_info; } } while ((skb = skb->next)); /* Free all of the segments. */ skb = segs; do { nskb = skb->next; if (err) kfree_skb(skb); else consume_skb(skb); } while ((skb = nskb)); return err; } static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; } /* Called with genl_mutex. */ static int flush_flows(int dp_ifindex) { struct flow_table *old_table; struct flow_table *new_table; struct datapath *dp; dp = get_dp(dp_ifindex); if (!dp) return -ENODEV; old_table = genl_dereference(dp->table); new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); if (!new_table) return -ENOMEM; rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(old_table); return 0; } static int validate_actions(const struct nlattr *attr, const struct sw_flow_key *key, int depth); static int validate_sample(const struct nlattr *attr, const struct sw_flow_key *key, int depth) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; const struct nlattr *a; int rem; memset(attrs, 0, sizeof(attrs)); nla_for_each_nested(a, attr, rem) { int type = nla_type(a); if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) return -EINVAL; attrs[type] = a; } if (rem) return -EINVAL; probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; if (!probability || nla_len(probability) != sizeof(u32)) return -EINVAL; actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) return -EINVAL; return validate_actions(actions, key, depth + 1); } static int validate_tp_port(const struct sw_flow_key *flow_key) { if (flow_key->eth.type == htons(ETH_P_IP)) { if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) return 0; } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) return 0; } return -EINVAL; } static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); /* There can be only one key in a action */ if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; if (key_type > OVS_KEY_ATTR_MAX || nla_len(ovs_key) != ovs_key_lens[key_type]) return -EINVAL; switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; case OVS_KEY_ATTR_PRIORITY: case OVS_KEY_ATTR_ETHERNET: break; case OVS_KEY_ATTR_IPV4: if (flow_key->eth.type != htons(ETH_P_IP)) return -EINVAL; if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) return -EINVAL; ipv4_key = nla_data(ovs_key); if (ipv4_key->ipv4_proto != flow_key->ip.proto) return -EINVAL; if (ipv4_key->ipv4_frag != flow_key->ip.frag) return -EINVAL; break; case OVS_KEY_ATTR_TCP: if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; return validate_tp_port(flow_key); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; return validate_tp_port(flow_key); default: return -EINVAL; } return 0; } static int validate_userspace(const struct nlattr *attr) { static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy); if (error) return error; if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) return -EINVAL; return 0; } static int validate_actions(const struct nlattr *attr, const struct sw_flow_key *key, int depth) { const struct nlattr *a; int rem, err; if (depth >= SAMPLE_ACTION_DEPTH) return -EOVERFLOW; nla_for_each_nested(a, attr, rem) { /* Expected argument lengths, (u32)-1 for variable length. */ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); if (type > OVS_ACTION_ATTR_MAX || (action_lens[type] != nla_len(a) && action_lens[type] != (u32)-1)) return -EINVAL; switch (type) { case OVS_ACTION_ATTR_UNSPEC: return -EINVAL; case OVS_ACTION_ATTR_USERSPACE: err = validate_userspace(a); if (err) return err; break; case OVS_ACTION_ATTR_OUTPUT: if (nla_get_u32(a) >= DP_MAX_PORTS) return -EINVAL; break; case OVS_ACTION_ATTR_POP_VLAN: break; case OVS_ACTION_ATTR_PUSH_VLAN: vlan = nla_data(a); if (vlan->vlan_tpid != htons(ETH_P_8021Q)) return -EINVAL; if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) return -EINVAL; break; case OVS_ACTION_ATTR_SET: err = validate_set(a, key); if (err) return err; break; case OVS_ACTION_ATTR_SAMPLE: err = validate_sample(a, key, depth); if (err) return err; break; default: return -EINVAL; } } if (rem > 0) return -EINVAL; return 0; } static void clear_stats(struct sw_flow *flow) { flow->used = 0; flow->tcp_flags = 0; flow->packet_count = 0; flow->byte_count = 0; } static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) { struct ovs_header *ovs_header = info->userhdr; struct nlattr **a = info->attrs; struct sw_flow_actions *acts; struct sk_buff *packet; struct sw_flow *flow; struct datapath *dp; struct ethhdr *eth; int len; int err; int key_len; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || !a[OVS_PACKET_ATTR_ACTIONS] || nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) goto err; len = nla_len(a[OVS_PACKET_ATTR_PACKET]); packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); err = -ENOMEM; if (!packet) goto err; skb_reserve(packet, NET_IP_ALIGN); memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); skb_reset_mac_header(packet); eth = eth_hdr(packet); /* Normally, setting the skb 'protocol' field would be handled by a * call to eth_type_trans(), but it assumes there's a sending * device, which we may not have. */ if (ntohs(eth->h_proto) >= 1536) packet->protocol = eth->h_proto; else packet->protocol = htons(ETH_P_802_2); /* Build an sw_flow for sending this packet. */ flow = ovs_flow_alloc(); err = PTR_ERR(flow); if (IS_ERR(flow)) goto err_kfree_skb; err = ovs_flow_extract(packet, -1, &flow->key, &key_len); if (err) goto err_flow_free; err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, &flow->key.phy.in_port, a[OVS_PACKET_ATTR_KEY]); if (err) goto err_flow_free; err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0); if (err) goto err_flow_free; flow->hash = ovs_flow_hash(&flow->key, key_len); acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); err = PTR_ERR(acts); if (IS_ERR(acts)) goto err_flow_free; rcu_assign_pointer(flow->sf_acts, acts); OVS_CB(packet)->flow = flow; packet->priority = flow->key.phy.priority; rcu_read_lock(); dp = get_dp(ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto err_unlock; local_bh_disable(); err = ovs_execute_actions(dp, packet); local_bh_enable(); rcu_read_unlock(); ovs_flow_free(flow); return err; err_unlock: rcu_read_unlock(); err_flow_free: ovs_flow_free(flow); err_kfree_skb: kfree_skb(packet); err: return err; } static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, }; static struct genl_ops dp_packet_genl_ops[] = { { .cmd = OVS_PACKET_CMD_EXECUTE, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = packet_policy, .doit = ovs_packet_cmd_execute } }; static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) { int i; struct flow_table *table = genl_dereference(dp->table); stats->n_flows = ovs_flow_tbl_count(table); stats->n_hit = stats->n_missed = stats->n_lost = 0; for_each_possible_cpu(i) { const struct dp_stats_percpu *percpu_stats; struct dp_stats_percpu local_stats; unsigned int start; percpu_stats = per_cpu_ptr(dp->stats_percpu, i); do { start = u64_stats_fetch_begin_bh(&percpu_stats->sync); local_stats = *percpu_stats; } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); stats->n_hit += local_stats.n_hit; stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; } } static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, }; static struct genl_family dp_flow_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_FLOW_FAMILY, .version = OVS_FLOW_VERSION, .maxattr = OVS_FLOW_ATTR_MAX }; static struct genl_multicast_group ovs_dp_flow_multicast_group = { .name = OVS_FLOW_MCGROUP }; /* Called with genl_lock. */ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { const int skb_orig_len = skb->len; const struct sw_flow_actions *sf_acts; struct ovs_flow_stats stats; struct ovs_header *ovs_header; struct nlattr *nla; unsigned long used; u8 tcp_flags; int err; sf_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; ovs_header->dp_ifindex = get_dpifindex(dp); nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) goto nla_put_failure; err = ovs_flow_to_nlattrs(&flow->key, skb); if (err) goto error; nla_nest_end(skb, nla); spin_lock_bh(&flow->lock); used = flow->used; stats.n_packets = flow->packet_count; stats.n_bytes = flow->byte_count; tcp_flags = flow->tcp_flags; spin_unlock_bh(&flow->lock); if (used) NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); if (stats.n_packets) NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats); if (tcp_flags) NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for * Netlink but individual action lists can be longer than * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. * The userspace caller can always fetch the actions separately if it * really wants them. (Most userspace callers in fact don't care.) * * This can only fail for dump operations because the skb is always * properly sized for single flows. */ err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, sf_acts->actions); if (err < 0 && skb_orig_len) goto error; return genlmsg_end(skb, ovs_header); nla_put_failure: err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; } static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) { const struct sw_flow_actions *sf_acts; int len; sf_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); /* OVS_FLOW_ATTR_KEY */ len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_ACTIONS */ len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_STATS */ len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_TCP_FLAGS */ len += nla_total_size(1); /* OVS_FLOW_ATTR_USED */ len += nla_total_size(8); len += NLMSG_ALIGN(sizeof(struct ovs_header)); return genlmsg_new(len, GFP_KERNEL); } static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = ovs_flow_cmd_alloc_info(flow); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd); BUG_ON(retval < 0); return skb; } static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sw_flow *flow; struct sk_buff *reply; struct datapath *dp; struct flow_table *table; int error; int key_len; /* Extract key. */ error = -EINVAL; if (!a[OVS_FLOW_ATTR_KEY]) goto error; error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (error) goto error; /* Validate actions. */ if (a[OVS_FLOW_ATTR_ACTIONS]) { error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0); if (error) goto error; } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { error = -EINVAL; goto error; } dp = get_dp(ovs_header->dp_ifindex); error = -ENODEV; if (!dp) goto error; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) { struct sw_flow_actions *acts; /* Bail out if we're not allowed to create a new flow. */ error = -ENOENT; if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) goto error; /* Expand table, if necessary, to make room. */ if (ovs_flow_tbl_need_to_expand(table)) { struct flow_table *new_table; new_table = ovs_flow_tbl_expand(table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(table); table = genl_dereference(dp->table); } } /* Allocate flow. */ flow = ovs_flow_alloc(); if (IS_ERR(flow)) { error = PTR_ERR(flow); goto error; } flow->key = key; clear_stats(flow); /* Obtain actions. */ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); error = PTR_ERR(acts); if (IS_ERR(acts)) goto error_free_flow; rcu_assign_pointer(flow->sf_acts, acts); /* Put flow in bucket. */ flow->hash = ovs_flow_hash(&key, key_len); ovs_flow_tbl_insert(table, flow); reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); } else { /* We found a matching flow. */ struct sw_flow_actions *old_acts; struct nlattr *acts_attrs; /* Bail out if we're not allowed to modify an existing flow. * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL * because Generic Netlink treats the latter as a dump * request. We also accept NLM_F_EXCL in case that bug ever * gets fixed. */ error = -EEXIST; if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) goto error; /* Update actions. */ old_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; if (acts_attrs && (old_acts->actions_len != nla_len(acts_attrs) || memcmp(old_acts->actions, nla_data(acts_attrs), old_acts->actions_len))) { struct sw_flow_actions *new_acts; new_acts = ovs_flow_actions_alloc(acts_attrs); error = PTR_ERR(new_acts); if (IS_ERR(new_acts)) goto error; rcu_assign_pointer(flow->sf_acts, new_acts); ovs_flow_deferred_free_acts(old_acts); } reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); /* Clear stats. */ if (a[OVS_FLOW_ATTR_CLEAR]) { spin_lock_bh(&flow->lock); clear_stats(flow); spin_unlock_bh(&flow->lock); } } if (!IS_ERR(reply)) genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); else netlink_set_err(init_net.genl_sock, 0, ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); return 0; error_free_flow: ovs_flow_free(flow); error: return error; } static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; struct flow_table *table; int err; int key_len; if (!a[OVS_FLOW_ATTR_KEY]) return -EINVAL; err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) return err; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) return -ENOENT; reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); return genlmsg_reply(reply, info); } static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; struct flow_table *table; int err; int key_len; if (!a[OVS_FLOW_ATTR_KEY]) return flush_flows(ovs_header->dp_ifindex); err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) return err; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) return -ENOENT; reply = ovs_flow_cmd_alloc_info(flow); if (!reply) return -ENOMEM; ovs_flow_tbl_remove(table, flow); err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid, info->snd_seq, 0, OVS_FLOW_CMD_DEL); BUG_ON(err < 0); ovs_flow_deferred_free(flow); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; } static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; struct flow_table *table; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); for (;;) { struct sw_flow *flow; u32 bucket, obj; bucket = cb->args[0]; obj = cb->args[1]; flow = ovs_flow_tbl_next(table, &bucket, &obj); if (!flow) break; if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_FLOW_CMD_NEW) < 0) break; cb->args[0] = bucket; cb->args[1] = obj; } return skb->len; } static struct genl_ops dp_flow_genl_ops[] = { { .cmd = OVS_FLOW_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_new_or_set }, { .cmd = OVS_FLOW_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_del }, { .cmd = OVS_FLOW_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = flow_policy, .doit = ovs_flow_cmd_get, .dumpit = ovs_flow_cmd_dump }, { .cmd = OVS_FLOW_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_new_or_set, }, }; static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, }; static struct genl_family dp_datapath_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_DATAPATH_FAMILY, .version = OVS_DATAPATH_VERSION, .maxattr = OVS_DP_ATTR_MAX }; static struct genl_multicast_group ovs_dp_datapath_multicast_group = { .name = OVS_DATAPATH_MCGROUP }; static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; struct ovs_dp_stats dp_stats; int err; ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family, flags, cmd); if (!ovs_header) goto error; ovs_header->dp_ifindex = get_dpifindex(dp); rcu_read_lock(); err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); rcu_read_unlock(); if (err) goto nla_put_failure; get_dp_stats(dp, &dp_stats); NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); return genlmsg_end(skb, ovs_header); nla_put_failure: genlmsg_cancel(skb, ovs_header); error: return -EMSGSIZE; } static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd); if (retval < 0) { kfree_skb(skb); return ERR_PTR(retval); } return skb; } /* Called with genl_mutex and optionally with RTNL lock also. */ static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; if (!a[OVS_DP_ATTR_NAME]) dp = get_dp(ovs_header->dp_ifindex); else { struct vport *vport; rcu_read_lock(); vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; rcu_read_unlock(); } return dp ? dp : ERR_PTR(-ENODEV); } static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct vport_parms parms; struct sk_buff *reply; struct datapath *dp; struct vport *vport; int err; err = -EINVAL; if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) goto err; rtnl_lock(); err = -ENODEV; if (!try_module_get(THIS_MODULE)) goto err_unlock_rtnl; err = -ENOMEM; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (dp == NULL) goto err_put_module; INIT_LIST_HEAD(&dp->port_list); /* Allocate table. */ err = -ENOMEM; rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); if (!dp->table) goto err_free_dp; dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); if (!dp->stats_percpu) { err = -ENOMEM; goto err_destroy_table; } /* Set up our datapath device. */ parms.name = nla_data(a[OVS_DP_ATTR_NAME]); parms.type = OVS_VPORT_TYPE_INTERNAL; parms.options = NULL; parms.dp = dp; parms.port_no = OVSP_LOCAL; parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); vport = new_vport(&parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); if (err == -EBUSY) err = -EEXIST; goto err_destroy_percpu; } reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); err = PTR_ERR(reply); if (IS_ERR(reply)) goto err_destroy_local_port; list_add_tail(&dp->list_node, &dps); rtnl_unlock(); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; err_destroy_local_port: ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: ovs_flow_tbl_destroy(genl_dereference(dp->table)); err_free_dp: kfree(dp); err_put_module: module_put(THIS_MODULE); err_unlock_rtnl: rtnl_unlock(); err: return err; } static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct vport *vport, *next_vport; struct sk_buff *reply; struct datapath *dp; int err; rtnl_lock(); dp = lookup_datapath(info->userhdr, info->attrs); err = PTR_ERR(dp); if (IS_ERR(dp)) goto exit_unlock; reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) if (vport->port_no != OVSP_LOCAL) ovs_dp_detach_port(vport); list_del(&dp->list_node); ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); /* rtnl_unlock() will wait until all the references to devices that * are pending unregistration have been dropped. We do it here to * ensure that any internal devices (which contain DP pointers) are * fully destroyed before freeing the datapath. */ rtnl_unlock(); call_rcu(&dp->rcu, destroy_dp_rcu); module_put(THIS_MODULE); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; exit_unlock: rtnl_unlock(); return err; } static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *reply; struct datapath *dp; int err; dp = lookup_datapath(info->userhdr, info->attrs); if (IS_ERR(dp)) return PTR_ERR(dp); reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); netlink_set_err(init_net.genl_sock, 0, ovs_dp_datapath_multicast_group.id, err); return 0; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; } static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *reply; struct datapath *dp; dp = lookup_datapath(info->userhdr, info->attrs); if (IS_ERR(dp)) return PTR_ERR(dp); reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); return genlmsg_reply(reply, info); } static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct datapath *dp; int skip = cb->args[0]; int i = 0; list_for_each_entry(dp, &dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_DP_CMD_NEW) < 0) break; i++; } cb->args[0] = i; return skb->len; } static struct genl_ops dp_datapath_genl_ops[] = { { .cmd = OVS_DP_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_new }, { .cmd = OVS_DP_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_del }, { .cmd = OVS_DP_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = datapath_policy, .doit = ovs_dp_cmd_get, .dumpit = ovs_dp_cmd_dump }, { .cmd = OVS_DP_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_set, }, }; static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, }; static struct genl_family dp_vport_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_VPORT_FAMILY, .version = OVS_VPORT_VERSION, .maxattr = OVS_VPORT_ATTR_MAX }; struct genl_multicast_group ovs_dp_vport_multicast_group = { .name = OVS_VPORT_MCGROUP }; /* Called with RTNL lock or RCU read lock. */ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; struct ovs_vport_stats vport_stats; int err; ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; ovs_header->dp_ifindex = get_dpifindex(vport->dp); NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); ovs_vport_get_stats(vport, &vport_stats); NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), &vport_stats); err = ovs_vport_get_options(vport, skb); if (err == -EMSGSIZE) goto error; return genlmsg_end(skb, ovs_header); nla_put_failure: err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; } /* Called with RTNL lock or RCU read lock. */ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd); if (retval < 0) { kfree_skb(skb); return ERR_PTR(retval); } return skb; } /* Called with RTNL lock or RCU read lock. */ static struct vport *lookup_vport(struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { struct datapath *dp; struct vport *vport; if (a[OVS_VPORT_ATTR_NAME]) { vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); if (!vport) return ERR_PTR(-ENODEV); if (ovs_header->dp_ifindex && ovs_header->dp_ifindex != get_dpifindex(vport->dp)) return ERR_PTR(-ENODEV); return vport; } else if (a[OVS_VPORT_ATTR_PORT_NO]) { u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); if (port_no >= DP_MAX_PORTS) return ERR_PTR(-EFBIG); dp = get_dp(ovs_header->dp_ifindex); if (!dp) return ERR_PTR(-ENODEV); vport = rcu_dereference_rtnl(dp->ports[port_no]); if (!vport) return ERR_PTR(-ENOENT); return vport; } else return ERR_PTR(-EINVAL); } static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct vport_parms parms; struct sk_buff *reply; struct vport *vport; struct datapath *dp; u32 port_no; int err; err = -EINVAL; if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || !a[OVS_VPORT_ATTR_UPCALL_PID]) goto exit; rtnl_lock(); dp = get_dp(ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto exit_unlock; if (a[OVS_VPORT_ATTR_PORT_NO]) { port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); err = -EFBIG; if (port_no >= DP_MAX_PORTS) goto exit_unlock; vport = rtnl_dereference(dp->ports[port_no]); err = -EBUSY; if (vport) goto exit_unlock; } else { for (port_no = 1; ; port_no++) { if (port_no >= DP_MAX_PORTS) { err = -EFBIG; goto exit_unlock; } vport = rtnl_dereference(dp->ports[port_no]); if (!vport) break; } } parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); parms.options = a[OVS_VPORT_ATTR_OPTIONS]; parms.dp = dp; parms.port_no = port_no; parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); vport = new_vport(&parms); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); ovs_dp_detach_port(vport); goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); exit: return err; } static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct sk_buff *reply; struct vport *vport; int err; rtnl_lock(); vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; err = 0; if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) err = -EINVAL; if (!err && a[OVS_VPORT_ATTR_OPTIONS]) err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (!err && a[OVS_VPORT_ATTR_UPCALL_PID]) vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { netlink_set_err(init_net.genl_sock, 0, ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); return err; } static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct sk_buff *reply; struct vport *vport; int err; rtnl_lock(); vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; if (vport->port_no == OVSP_LOCAL) { err = -EINVAL; goto exit_unlock; } reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; ovs_dp_detach_port(vport); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); return err; } static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sk_buff *reply; struct vport *vport; int err; rcu_read_lock(); vport = lookup_vport(ovs_header, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; rcu_read_unlock(); return genlmsg_reply(reply, info); exit_unlock: rcu_read_unlock(); return err; } static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; u32 port_no; int retval; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; rcu_read_lock(); for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { struct vport *vport; vport = rcu_dereference(dp->ports[port_no]); if (!vport) continue; if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_VPORT_CMD_NEW) < 0) break; } rcu_read_unlock(); cb->args[0] = port_no; retval = skb->len; return retval; } static void rehash_flow_table(struct work_struct *work) { struct datapath *dp; genl_lock(); list_for_each_entry(dp, &dps, list_node) { struct flow_table *old_table = genl_dereference(dp->table); struct flow_table *new_table; new_table = ovs_flow_tbl_rehash(old_table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(old_table); } } genl_unlock(); schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); } static struct genl_ops dp_vport_genl_ops[] = { { .cmd = OVS_VPORT_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_new }, { .cmd = OVS_VPORT_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_del }, { .cmd = OVS_VPORT_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = vport_policy, .doit = ovs_vport_cmd_get, .dumpit = ovs_vport_cmd_dump }, { .cmd = OVS_VPORT_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_set, }, }; struct genl_family_and_ops { struct genl_family *family; struct genl_ops *ops; int n_ops; struct genl_multicast_group *group; }; static const struct genl_family_and_ops dp_genl_families[] = { { &dp_datapath_genl_family, dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), &ovs_dp_datapath_multicast_group }, { &dp_vport_genl_family, dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), &ovs_dp_vport_multicast_group }, { &dp_flow_genl_family, dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), &ovs_dp_flow_multicast_group }, { &dp_packet_genl_family, dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), NULL }, }; static void dp_unregister_genl(int n_families) { int i; for (i = 0; i < n_families; i++) genl_unregister_family(dp_genl_families[i].family); } static int dp_register_genl(void) { int n_registered; int err; int i; n_registered = 0; for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { const struct genl_family_and_ops *f = &dp_genl_families[i]; err = genl_register_family_with_ops(f->family, f->ops, f->n_ops); if (err) goto error; n_registered++; if (f->group) { err = genl_register_mc_group(f->family, f->group); if (err) goto error; } } return 0; error: dp_unregister_genl(n_registered); return err; } static int __init dp_init(void) { struct sk_buff *dummy_skb; int err; BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); pr_info("Open vSwitch switching datapath\n"); err = ovs_flow_init(); if (err) goto error; err = ovs_vport_init(); if (err) goto error_flow_exit; err = register_netdevice_notifier(&ovs_dp_device_notifier); if (err) goto error_vport_exit; err = dp_register_genl(); if (err < 0) goto error_unreg_notifier; schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); return 0; error_unreg_notifier: unregister_netdevice_notifier(&ovs_dp_device_notifier); error_vport_exit: ovs_vport_exit(); error_flow_exit: ovs_flow_exit(); error: return err; } static void dp_cleanup(void) { cancel_delayed_work_sync(&rehash_flow_wq); rcu_barrier(); dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); unregister_netdevice_notifier(&ovs_dp_device_notifier); ovs_vport_exit(); ovs_flow_exit(); } module_init(dp_init); module_exit(dp_cleanup); MODULE_DESCRIPTION("Open vSwitch switching datapath"); MODULE_LICENSE("GPL");
gpl-2.0
geekboxzone/lollipop_kernel
drivers/isdn/hisax/hisax_fcpcipnp.c
4566
25456
/* * Driver for AVM Fritz!PCI, Fritz!PCI v2, Fritz!PnP ISDN cards * * Author Kai Germaschewski * Copyright 2001 by Kai Germaschewski <kai.germaschewski@gmx.de> * 2001 by Karsten Keil <keil@isdn4linux.de> * * based upon Karsten Keil's original avm_pci.c driver * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Thanks to Wizard Computersysteme GmbH, Bremervoerde and * SoHaNet Technology GmbH, Berlin * for supporting the development of this driver */ /* TODO: * * o POWER PC * o clean up debugging * o tx_skb at PH_DEACTIVATE time */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/isapnp.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <asm/io.h> #include "hisax_fcpcipnp.h" // debugging cruft #define __debug_variable debug #include "hisax_debug.h" #ifdef CONFIG_HISAX_DEBUG static int debug = 0; /* static int hdlcfifosize = 32; */ module_param(debug, int, 0); /* module_param(hdlcfifosize, int, 0); */ #endif MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>"); MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver"); static struct pci_device_id fcpci_ids[] = { { .vendor = PCI_VENDOR_ID_AVM, .device = PCI_DEVICE_ID_AVM_A1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (unsigned long) "Fritz!Card PCI", }, { .vendor = PCI_VENDOR_ID_AVM, .device = PCI_DEVICE_ID_AVM_A1_V2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (unsigned long) "Fritz!Card PCI v2" }, {} }; MODULE_DEVICE_TABLE(pci, fcpci_ids); #ifdef CONFIG_PNP static struct pnp_device_id fcpnp_ids[] = { { .id = "AVM0900", .driver_data = (unsigned long) "Fritz!Card PnP", }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, fcpnp_ids); #endif static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); MODULE_LICENSE("GPL"); // ---------------------------------------------------------------------- #define AVM_INDEX 0x04 #define AVM_DATA 0x10 #define AVM_IDX_HDLC_1 0x00 #define AVM_IDX_HDLC_2 0x01 #define AVM_IDX_ISAC_FIFO 0x02 #define AVM_IDX_ISAC_REG_LOW 0x04 #define AVM_IDX_ISAC_REG_HIGH 0x06 #define AVM_STATUS0 0x02 #define AVM_STATUS0_IRQ_ISAC 0x01 #define AVM_STATUS0_IRQ_HDLC 0x02 #define AVM_STATUS0_IRQ_TIMER 0x04 #define AVM_STATUS0_IRQ_MASK 0x07 #define AVM_STATUS0_RESET 0x01 #define AVM_STATUS0_DIS_TIMER 0x02 #define AVM_STATUS0_RES_TIMER 0x04 #define AVM_STATUS0_ENA_IRQ 0x08 #define AVM_STATUS0_TESTBIT 0x10 #define AVM_STATUS1 0x03 #define AVM_STATUS1_ENA_IOM 0x80 #define HDLC_FIFO 0x0 #define HDLC_STATUS 0x4 #define HDLC_CTRL 0x4 #define HDLC_MODE_ITF_FLG 0x01 #define HDLC_MODE_TRANS 0x02 #define HDLC_MODE_CCR_7 0x04 #define HDLC_MODE_CCR_16 0x08 #define HDLC_MODE_TESTLOOP 0x80 #define HDLC_INT_XPR 0x80 #define HDLC_INT_XDU 0x40 #define HDLC_INT_RPR 0x20 #define HDLC_INT_MASK 0xE0 #define HDLC_STAT_RME 0x01 #define HDLC_STAT_RDO 0x10 #define HDLC_STAT_CRCVFRRAB 0x0E #define HDLC_STAT_CRCVFR 0x06 #define HDLC_STAT_RML_MASK 0xff00 #define HDLC_CMD_XRS 0x80 #define HDLC_CMD_XME 0x01 #define HDLC_CMD_RRS 0x20 #define HDLC_CMD_XML_MASK 0xff00 #define AVM_HDLC_FIFO_1 0x10 #define AVM_HDLC_FIFO_2 0x18 #define AVM_HDLC_STATUS_1 0x14 #define AVM_HDLC_STATUS_2 0x1c #define AVM_ISACSX_INDEX 0x04 #define AVM_ISACSX_DATA 0x08 // ---------------------------------------------------------------------- // Fritz!PCI static unsigned char fcpci_read_isac(struct isac *isac, unsigned char offset) { struct fritz_adapter *adapter = isac->priv; unsigned char idx = (offset > 0x2f) ? AVM_IDX_ISAC_REG_HIGH : AVM_IDX_ISAC_REG_LOW; unsigned char val; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outb(idx, adapter->io + AVM_INDEX); val = inb(adapter->io + AVM_DATA + (offset & 0xf)); spin_unlock_irqrestore(&adapter->hw_lock, flags); DBG(0x1000, " port %#x, value %#x", offset, val); return val; } static void fcpci_write_isac(struct isac *isac, unsigned char offset, unsigned char value) { struct fritz_adapter *adapter = isac->priv; unsigned char idx = (offset > 0x2f) ? AVM_IDX_ISAC_REG_HIGH : AVM_IDX_ISAC_REG_LOW; unsigned long flags; DBG(0x1000, " port %#x, value %#x", offset, value); spin_lock_irqsave(&adapter->hw_lock, flags); outb(idx, adapter->io + AVM_INDEX); outb(value, adapter->io + AVM_DATA + (offset & 0xf)); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static void fcpci_read_isac_fifo(struct isac *isac, unsigned char *data, int size) { struct fritz_adapter *adapter = isac->priv; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX); insb(adapter->io + AVM_DATA, data, size); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static void fcpci_write_isac_fifo(struct isac *isac, unsigned char *data, int size) { struct fritz_adapter *adapter = isac->priv; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX); outsb(adapter->io + AVM_DATA, data, size); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static u32 fcpci_read_hdlc_status(struct fritz_adapter *adapter, int nr) { u32 val; int idx = nr ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outl(idx, adapter->io + AVM_INDEX); val = inl(adapter->io + AVM_DATA + HDLC_STATUS); spin_unlock_irqrestore(&adapter->hw_lock, flags); return val; } static void __fcpci_write_ctrl(struct fritz_bcs *bcs, int which) { struct fritz_adapter *adapter = bcs->adapter; int idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1; DBG(0x40, "hdlc %c wr%x ctrl %x", 'A' + bcs->channel, which, bcs->ctrl.ctrl); outl(idx, adapter->io + AVM_INDEX); outl(bcs->ctrl.ctrl, adapter->io + AVM_DATA + HDLC_CTRL); } static void fcpci_write_ctrl(struct fritz_bcs *bcs, int which) { struct fritz_adapter *adapter = bcs->adapter; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); __fcpci_write_ctrl(bcs, which); spin_unlock_irqrestore(&adapter->hw_lock, flags); } // ---------------------------------------------------------------------- // Fritz!PCI v2 static unsigned char fcpci2_read_isac(struct isac *isac, unsigned char offset) { struct fritz_adapter *adapter = isac->priv; unsigned char val; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outl(offset, adapter->io + AVM_ISACSX_INDEX); val = inl(adapter->io + AVM_ISACSX_DATA); spin_unlock_irqrestore(&adapter->hw_lock, flags); DBG(0x1000, " port %#x, value %#x", offset, val); return val; } static void fcpci2_write_isac(struct isac *isac, unsigned char offset, unsigned char value) { struct fritz_adapter *adapter = isac->priv; unsigned long flags; DBG(0x1000, " port %#x, value %#x", offset, value); spin_lock_irqsave(&adapter->hw_lock, flags); outl(offset, adapter->io + AVM_ISACSX_INDEX); outl(value, adapter->io + AVM_ISACSX_DATA); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static void fcpci2_read_isac_fifo(struct isac *isac, unsigned char *data, int size) { struct fritz_adapter *adapter = isac->priv; int i; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outl(0, adapter->io + AVM_ISACSX_INDEX); for (i = 0; i < size; i++) data[i] = inl(adapter->io + AVM_ISACSX_DATA); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static void fcpci2_write_isac_fifo(struct isac *isac, unsigned char *data, int size) { struct fritz_adapter *adapter = isac->priv; int i; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outl(0, adapter->io + AVM_ISACSX_INDEX); for (i = 0; i < size; i++) outl(data[i], adapter->io + AVM_ISACSX_DATA); spin_unlock_irqrestore(&adapter->hw_lock, flags); } static u32 fcpci2_read_hdlc_status(struct fritz_adapter *adapter, int nr) { int offset = nr ? AVM_HDLC_STATUS_2 : AVM_HDLC_STATUS_1; return inl(adapter->io + offset); } static void fcpci2_write_ctrl(struct fritz_bcs *bcs, int which) { struct fritz_adapter *adapter = bcs->adapter; int offset = bcs->channel ? AVM_HDLC_STATUS_2 : AVM_HDLC_STATUS_1; DBG(0x40, "hdlc %c wr%x ctrl %x", 'A' + bcs->channel, which, bcs->ctrl.ctrl); outl(bcs->ctrl.ctrl, adapter->io + offset); } // ---------------------------------------------------------------------- // Fritz!PnP (ISAC access as for Fritz!PCI) static u32 fcpnp_read_hdlc_status(struct fritz_adapter *adapter, int nr) { unsigned char idx = nr ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1; u32 val; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); outb(idx, adapter->io + AVM_INDEX); val = inb(adapter->io + AVM_DATA + HDLC_STATUS); if (val & HDLC_INT_RPR) val |= inb(adapter->io + AVM_DATA + HDLC_STATUS + 1) << 8; spin_unlock_irqrestore(&adapter->hw_lock, flags); return val; } static void __fcpnp_write_ctrl(struct fritz_bcs *bcs, int which) { struct fritz_adapter *adapter = bcs->adapter; unsigned char idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1; DBG(0x40, "hdlc %c wr%x ctrl %x", 'A' + bcs->channel, which, bcs->ctrl.ctrl); outb(idx, adapter->io + AVM_INDEX); if (which & 4) outb(bcs->ctrl.sr.mode, adapter->io + AVM_DATA + HDLC_STATUS + 2); if (which & 2) outb(bcs->ctrl.sr.xml, adapter->io + AVM_DATA + HDLC_STATUS + 1); if (which & 1) outb(bcs->ctrl.sr.cmd, adapter->io + AVM_DATA + HDLC_STATUS + 0); } static void fcpnp_write_ctrl(struct fritz_bcs *bcs, int which) { struct fritz_adapter *adapter = bcs->adapter; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); __fcpnp_write_ctrl(bcs, which); spin_unlock_irqrestore(&adapter->hw_lock, flags); } // ---------------------------------------------------------------------- static inline void B_L1L2(struct fritz_bcs *bcs, int pr, void *arg) { struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if; DBG(2, "pr %#x", pr); ifc->l1l2(ifc, pr, arg); } static void hdlc_fill_fifo(struct fritz_bcs *bcs) { struct fritz_adapter *adapter = bcs->adapter; struct sk_buff *skb = bcs->tx_skb; int count; unsigned long flags; unsigned char *p; DBG(0x40, "hdlc_fill_fifo"); BUG_ON(skb->len == 0); bcs->ctrl.sr.cmd &= ~HDLC_CMD_XME; if (bcs->tx_skb->len > bcs->fifo_size) { count = bcs->fifo_size; } else { count = bcs->tx_skb->len; if (bcs->mode != L1_MODE_TRANS) bcs->ctrl.sr.cmd |= HDLC_CMD_XME; } DBG(0x40, "hdlc_fill_fifo %d/%d", count, bcs->tx_skb->len); p = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt += count; bcs->ctrl.sr.xml = ((count == bcs->fifo_size) ? 0 : count); switch (adapter->type) { case AVM_FRITZ_PCI: spin_lock_irqsave(&adapter->hw_lock, flags); // sets the correct AVM_INDEX, too __fcpci_write_ctrl(bcs, 3); outsl(adapter->io + AVM_DATA + HDLC_FIFO, p, (count + 3) / 4); spin_unlock_irqrestore(&adapter->hw_lock, flags); break; case AVM_FRITZ_PCIV2: fcpci2_write_ctrl(bcs, 3); outsl(adapter->io + (bcs->channel ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1), p, (count + 3) / 4); break; case AVM_FRITZ_PNP: spin_lock_irqsave(&adapter->hw_lock, flags); // sets the correct AVM_INDEX, too __fcpnp_write_ctrl(bcs, 3); outsb(adapter->io + AVM_DATA, p, count); spin_unlock_irqrestore(&adapter->hw_lock, flags); break; } } static inline void hdlc_empty_fifo(struct fritz_bcs *bcs, int count) { struct fritz_adapter *adapter = bcs->adapter; unsigned char *p; unsigned char idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1; DBG(0x10, "hdlc_empty_fifo %d", count); if (bcs->rcvidx + count > HSCX_BUFMAX) { DBG(0x10, "hdlc_empty_fifo: incoming packet too large"); return; } p = bcs->rcvbuf + bcs->rcvidx; bcs->rcvidx += count; switch (adapter->type) { case AVM_FRITZ_PCI: spin_lock(&adapter->hw_lock); outl(idx, adapter->io + AVM_INDEX); insl(adapter->io + AVM_DATA + HDLC_FIFO, p, (count + 3) / 4); spin_unlock(&adapter->hw_lock); break; case AVM_FRITZ_PCIV2: insl(adapter->io + (bcs->channel ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1), p, (count + 3) / 4); break; case AVM_FRITZ_PNP: spin_lock(&adapter->hw_lock); outb(idx, adapter->io + AVM_INDEX); insb(adapter->io + AVM_DATA, p, count); spin_unlock(&adapter->hw_lock); break; } } static inline void hdlc_rpr_irq(struct fritz_bcs *bcs, u32 stat) { struct fritz_adapter *adapter = bcs->adapter; struct sk_buff *skb; int len; if (stat & HDLC_STAT_RDO) { DBG(0x10, "RDO"); bcs->ctrl.sr.xml = 0; bcs->ctrl.sr.cmd |= HDLC_CMD_RRS; adapter->write_ctrl(bcs, 1); bcs->ctrl.sr.cmd &= ~HDLC_CMD_RRS; adapter->write_ctrl(bcs, 1); bcs->rcvidx = 0; return; } len = (stat & HDLC_STAT_RML_MASK) >> 8; if (len == 0) len = bcs->fifo_size; hdlc_empty_fifo(bcs, len); if ((stat & HDLC_STAT_RME) || (bcs->mode == L1_MODE_TRANS)) { if (((stat & HDLC_STAT_CRCVFRRAB) == HDLC_STAT_CRCVFR) || (bcs->mode == L1_MODE_TRANS)) { skb = dev_alloc_skb(bcs->rcvidx); if (!skb) { printk(KERN_WARNING "HDLC: receive out of memory\n"); } else { memcpy(skb_put(skb, bcs->rcvidx), bcs->rcvbuf, bcs->rcvidx); DBG_SKB(1, skb); B_L1L2(bcs, PH_DATA | INDICATION, skb); } bcs->rcvidx = 0; } else { DBG(0x10, "ch%d invalid frame %#x", bcs->channel, stat); bcs->rcvidx = 0; } } } static inline void hdlc_xdu_irq(struct fritz_bcs *bcs) { struct fritz_adapter *adapter = bcs->adapter; /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ bcs->ctrl.sr.xml = 0; bcs->ctrl.sr.cmd |= HDLC_CMD_XRS; adapter->write_ctrl(bcs, 1); bcs->ctrl.sr.cmd &= ~HDLC_CMD_XRS; if (!bcs->tx_skb) { DBG(0x10, "XDU without skb"); adapter->write_ctrl(bcs, 1); return; } /* only hdlc restarts the frame, transparent mode must continue */ if (bcs->mode == L1_MODE_HDLC) { skb_push(bcs->tx_skb, bcs->tx_cnt); bcs->tx_cnt = 0; } } static inline void hdlc_xpr_irq(struct fritz_bcs *bcs) { struct sk_buff *skb; skb = bcs->tx_skb; if (!skb) return; if (skb->len) { hdlc_fill_fifo(bcs); return; } bcs->tx_cnt = 0; bcs->tx_skb = NULL; B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long)skb->truesize); dev_kfree_skb_irq(skb); } static void hdlc_irq_one(struct fritz_bcs *bcs, u32 stat) { DBG(0x10, "ch%d stat %#x", bcs->channel, stat); if (stat & HDLC_INT_RPR) { DBG(0x10, "RPR"); hdlc_rpr_irq(bcs, stat); } if (stat & HDLC_INT_XDU) { DBG(0x10, "XDU"); hdlc_xdu_irq(bcs); hdlc_xpr_irq(bcs); return; } if (stat & HDLC_INT_XPR) { DBG(0x10, "XPR"); hdlc_xpr_irq(bcs); } } static inline void hdlc_irq(struct fritz_adapter *adapter) { int nr; u32 stat; for (nr = 0; nr < 2; nr++) { stat = adapter->read_hdlc_status(adapter, nr); DBG(0x10, "HDLC %c stat %#x", 'A' + nr, stat); if (stat & HDLC_INT_MASK) hdlc_irq_one(&adapter->bcs[nr], stat); } } static void modehdlc(struct fritz_bcs *bcs, int mode) { struct fritz_adapter *adapter = bcs->adapter; DBG(0x40, "hdlc %c mode %d --> %d", 'A' + bcs->channel, bcs->mode, mode); if (bcs->mode == mode) return; bcs->fifo_size = 32; bcs->ctrl.ctrl = 0; bcs->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; switch (mode) { case L1_MODE_NULL: bcs->ctrl.sr.mode = HDLC_MODE_TRANS; adapter->write_ctrl(bcs, 5); break; case L1_MODE_TRANS: case L1_MODE_HDLC: bcs->rcvidx = 0; bcs->tx_cnt = 0; bcs->tx_skb = NULL; if (mode == L1_MODE_TRANS) { bcs->ctrl.sr.mode = HDLC_MODE_TRANS; } else { bcs->ctrl.sr.mode = HDLC_MODE_ITF_FLG; } adapter->write_ctrl(bcs, 5); bcs->ctrl.sr.cmd = HDLC_CMD_XRS; adapter->write_ctrl(bcs, 1); bcs->ctrl.sr.cmd = 0; break; } bcs->mode = mode; } static void fritz_b_l2l1(struct hisax_if *ifc, int pr, void *arg) { struct fritz_bcs *bcs = ifc->priv; struct sk_buff *skb = arg; int mode; DBG(0x10, "pr %#x", pr); switch (pr) { case PH_DATA | REQUEST: BUG_ON(bcs->tx_skb); bcs->tx_skb = skb; DBG_SKB(1, skb); hdlc_fill_fifo(bcs); break; case PH_ACTIVATE | REQUEST: mode = (long) arg; DBG(4, "B%d,PH_ACTIVATE_REQUEST %d", bcs->channel + 1, mode); modehdlc(bcs, mode); B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL); break; case PH_DEACTIVATE | REQUEST: DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1); modehdlc(bcs, L1_MODE_NULL); B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL); break; } } // ---------------------------------------------------------------------- static irqreturn_t fcpci2_irq(int intno, void *dev) { struct fritz_adapter *adapter = dev; unsigned char val; val = inb(adapter->io + AVM_STATUS0); if (!(val & AVM_STATUS0_IRQ_MASK)) /* hopefully a shared IRQ reqest */ return IRQ_NONE; DBG(2, "STATUS0 %#x", val); if (val & AVM_STATUS0_IRQ_ISAC) isacsx_irq(&adapter->isac); if (val & AVM_STATUS0_IRQ_HDLC) hdlc_irq(adapter); if (val & AVM_STATUS0_IRQ_ISAC) isacsx_irq(&adapter->isac); return IRQ_HANDLED; } static irqreturn_t fcpci_irq(int intno, void *dev) { struct fritz_adapter *adapter = dev; unsigned char sval; sval = inb(adapter->io + 2); if ((sval & AVM_STATUS0_IRQ_MASK) == AVM_STATUS0_IRQ_MASK) /* possibly a shared IRQ reqest */ return IRQ_NONE; DBG(2, "sval %#x", sval); if (!(sval & AVM_STATUS0_IRQ_ISAC)) isac_irq(&adapter->isac); if (!(sval & AVM_STATUS0_IRQ_HDLC)) hdlc_irq(adapter); return IRQ_HANDLED; } // ---------------------------------------------------------------------- static inline void fcpci2_init(struct fritz_adapter *adapter) { outb(AVM_STATUS0_RES_TIMER, adapter->io + AVM_STATUS0); outb(AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0); } static inline void fcpci_init(struct fritz_adapter *adapter) { outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER | AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0); outb(AVM_STATUS1_ENA_IOM | adapter->irq, adapter->io + AVM_STATUS1); mdelay(10); } // ---------------------------------------------------------------------- static int fcpcipnp_setup(struct fritz_adapter *adapter) { u32 val = 0; int retval; DBG(1, ""); isac_init(&adapter->isac); // FIXME is this okay now retval = -EBUSY; if (!request_region(adapter->io, 32, "fcpcipnp")) goto err; switch (adapter->type) { case AVM_FRITZ_PCIV2: case AVM_FRITZ_PCI: val = inl(adapter->io); break; case AVM_FRITZ_PNP: val = inb(adapter->io); val |= inb(adapter->io + 1) << 8; break; } DBG(1, "stat %#x Class %X Rev %d", val, val & 0xff, (val >> 8) & 0xff); spin_lock_init(&adapter->hw_lock); adapter->isac.priv = adapter; switch (adapter->type) { case AVM_FRITZ_PCIV2: adapter->isac.read_isac = &fcpci2_read_isac; adapter->isac.write_isac = &fcpci2_write_isac; adapter->isac.read_isac_fifo = &fcpci2_read_isac_fifo; adapter->isac.write_isac_fifo = &fcpci2_write_isac_fifo; adapter->read_hdlc_status = &fcpci2_read_hdlc_status; adapter->write_ctrl = &fcpci2_write_ctrl; break; case AVM_FRITZ_PCI: adapter->isac.read_isac = &fcpci_read_isac; adapter->isac.write_isac = &fcpci_write_isac; adapter->isac.read_isac_fifo = &fcpci_read_isac_fifo; adapter->isac.write_isac_fifo = &fcpci_write_isac_fifo; adapter->read_hdlc_status = &fcpci_read_hdlc_status; adapter->write_ctrl = &fcpci_write_ctrl; break; case AVM_FRITZ_PNP: adapter->isac.read_isac = &fcpci_read_isac; adapter->isac.write_isac = &fcpci_write_isac; adapter->isac.read_isac_fifo = &fcpci_read_isac_fifo; adapter->isac.write_isac_fifo = &fcpci_write_isac_fifo; adapter->read_hdlc_status = &fcpnp_read_hdlc_status; adapter->write_ctrl = &fcpnp_write_ctrl; break; } // Reset outb(0, adapter->io + AVM_STATUS0); mdelay(10); outb(AVM_STATUS0_RESET, adapter->io + AVM_STATUS0); mdelay(10); outb(0, adapter->io + AVM_STATUS0); mdelay(10); switch (adapter->type) { case AVM_FRITZ_PCIV2: retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, "fcpcipnp", adapter); break; case AVM_FRITZ_PCI: retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, "fcpcipnp", adapter); break; case AVM_FRITZ_PNP: retval = request_irq(adapter->irq, fcpci_irq, 0, "fcpcipnp", adapter); break; } if (retval) goto err_region; switch (adapter->type) { case AVM_FRITZ_PCIV2: fcpci2_init(adapter); isacsx_setup(&adapter->isac); break; case AVM_FRITZ_PCI: case AVM_FRITZ_PNP: fcpci_init(adapter); isac_setup(&adapter->isac); break; } val = adapter->read_hdlc_status(adapter, 0); DBG(0x20, "HDLC A STA %x", val); val = adapter->read_hdlc_status(adapter, 1); DBG(0x20, "HDLC B STA %x", val); adapter->bcs[0].mode = -1; adapter->bcs[1].mode = -1; modehdlc(&adapter->bcs[0], L1_MODE_NULL); modehdlc(&adapter->bcs[1], L1_MODE_NULL); return 0; err_region: release_region(adapter->io, 32); err: return retval; } static void fcpcipnp_release(struct fritz_adapter *adapter) { DBG(1, ""); outb(0, adapter->io + AVM_STATUS0); free_irq(adapter->irq, adapter); release_region(adapter->io, 32); } // ---------------------------------------------------------------------- static struct fritz_adapter *new_adapter(void) { struct fritz_adapter *adapter; struct hisax_b_if *b_if[2]; int i; adapter = kzalloc(sizeof(struct fritz_adapter), GFP_KERNEL); if (!adapter) return NULL; adapter->isac.hisax_d_if.owner = THIS_MODULE; adapter->isac.hisax_d_if.ifc.priv = &adapter->isac; adapter->isac.hisax_d_if.ifc.l2l1 = isac_d_l2l1; for (i = 0; i < 2; i++) { adapter->bcs[i].adapter = adapter; adapter->bcs[i].channel = i; adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i]; adapter->bcs[i].b_if.ifc.l2l1 = fritz_b_l2l1; } for (i = 0; i < 2; i++) b_if[i] = &adapter->bcs[i].b_if; if (hisax_register(&adapter->isac.hisax_d_if, b_if, "fcpcipnp", protocol) != 0) { kfree(adapter); adapter = NULL; } return adapter; } static void delete_adapter(struct fritz_adapter *adapter) { hisax_unregister(&adapter->isac.hisax_d_if); kfree(adapter); } static int fcpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct fritz_adapter *adapter; int retval; retval = -ENOMEM; adapter = new_adapter(); if (!adapter) goto err; pci_set_drvdata(pdev, adapter); if (pdev->device == PCI_DEVICE_ID_AVM_A1_V2) adapter->type = AVM_FRITZ_PCIV2; else adapter->type = AVM_FRITZ_PCI; retval = pci_enable_device(pdev); if (retval) goto err_free; adapter->io = pci_resource_start(pdev, 1); adapter->irq = pdev->irq; printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at %s\n", (char *) ent->driver_data, pci_name(pdev)); retval = fcpcipnp_setup(adapter); if (retval) goto err_free; return 0; err_free: delete_adapter(adapter); err: return retval; } #ifdef CONFIG_PNP static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) { struct fritz_adapter *adapter; int retval; if (!pdev) return (-ENODEV); retval = -ENOMEM; adapter = new_adapter(); if (!adapter) goto err; pnp_set_drvdata(pdev, adapter); adapter->type = AVM_FRITZ_PNP; pnp_disable_dev(pdev); retval = pnp_activate_dev(pdev); if (retval < 0) { printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __func__, (char *)dev_id->driver_data, retval); goto err_free; } adapter->io = pnp_port_start(pdev, 0); adapter->irq = pnp_irq(pdev, 0); printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n", (char *) dev_id->driver_data, adapter->io, adapter->irq); retval = fcpcipnp_setup(adapter); if (retval) goto err_free; return 0; err_free: delete_adapter(adapter); err: return retval; } static void fcpnp_remove(struct pnp_dev *pdev) { struct fritz_adapter *adapter = pnp_get_drvdata(pdev); if (adapter) { fcpcipnp_release(adapter); delete_adapter(adapter); } pnp_disable_dev(pdev); } static struct pnp_driver fcpnp_driver = { .name = "fcpnp", .probe = fcpnp_probe, .remove = fcpnp_remove, .id_table = fcpnp_ids, }; #endif static void fcpci_remove(struct pci_dev *pdev) { struct fritz_adapter *adapter = pci_get_drvdata(pdev); fcpcipnp_release(adapter); pci_disable_device(pdev); delete_adapter(adapter); } static struct pci_driver fcpci_driver = { .name = "fcpci", .probe = fcpci_probe, .remove = fcpci_remove, .id_table = fcpci_ids, }; static int __init hisax_fcpcipnp_init(void) { int retval; printk(KERN_INFO "hisax_fcpcipnp: Fritz!Card PCI/PCIv2/PnP ISDN driver v0.0.1\n"); retval = pci_register_driver(&fcpci_driver); if (retval) return retval; #ifdef CONFIG_PNP retval = pnp_register_driver(&fcpnp_driver); if (retval < 0) { pci_unregister_driver(&fcpci_driver); return retval; } #endif return 0; } static void __exit hisax_fcpcipnp_exit(void) { #ifdef CONFIG_PNP pnp_unregister_driver(&fcpnp_driver); #endif pci_unregister_driver(&fcpci_driver); } module_init(hisax_fcpcipnp_init); module_exit(hisax_fcpcipnp_exit);
gpl-2.0
Nibsalot/AK-Mako
arch/arm/mach-s3c64xx/mach-real6410.c
4822
7686
/* linux/arch/arm/mach-s3c64xx/mach-real6410.c * * Copyright 2010 Darius Augulis <augulis.darius@gmail.com> * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/dm9000.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/types.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/map.h> #include <mach/regs-gpio.h> #include <mach/regs-modem.h> #include <mach/regs-srom.h> #include <plat/adc.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/nand.h> #include <plat/regs-serial.h> #include <plat/ts.h> #include <plat/regs-fb-v4.h> #include <video/platform_lcd.h> #include "common.h" #define UCON S3C2410_UCON_DEFAULT #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [3] = { .hwport = 3, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; /* DM9000AEP 10/100 ethernet controller */ static struct resource real6410_dm9k_resource[] = { [0] = { .start = S3C64XX_PA_XM0CSN1, .end = S3C64XX_PA_XM0CSN1 + 1, .flags = IORESOURCE_MEM }, [1] = { .start = S3C64XX_PA_XM0CSN1 + 4, .end = S3C64XX_PA_XM0CSN1 + 5, .flags = IORESOURCE_MEM }, [2] = { .start = S3C_EINT(7), .end = S3C_EINT(7), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL } }; static struct dm9000_plat_data real6410_dm9k_pdata = { .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), }; static struct platform_device real6410_device_eth = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(real6410_dm9k_resource), .resource = real6410_dm9k_resource, .dev = { .platform_data = &real6410_dm9k_pdata, }, }; static struct s3c_fb_pd_win real6410_fb_win[] = { { .win_mode = { /* 4.3" 480x272 */ .left_margin = 3, .right_margin = 2, .upper_margin = 1, .lower_margin = 1, .hsync_len = 40, .vsync_len = 1, .xres = 480, .yres = 272, }, .max_bpp = 32, .default_bpp = 16, }, { .win_mode = { /* 7.0" 800x480 */ .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, }, }; static struct s3c_fb_platdata real6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &real6410_fb_win[0], .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; static struct mtd_partition real6410_nand_part[] = { [0] = { .name = "uboot", .size = SZ_1M, .offset = 0, }, [1] = { .name = "kernel", .size = SZ_2M, .offset = SZ_1M, }, [2] = { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = SZ_1M + SZ_2M, }, }; static struct s3c2410_nand_set real6410_nand_sets[] = { [0] = { .name = "nand", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(real6410_nand_part), .partitions = real6410_nand_part, }, }; static struct s3c2410_platform_nand real6410_nand_info = { .tacls = 25, .twrph0 = 55, .twrph1 = 40, .nr_sets = ARRAY_SIZE(real6410_nand_sets), .sets = real6410_nand_sets, }; static struct platform_device *real6410_devices[] __initdata = { &real6410_device_eth, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_fb, &s3c_device_nand, &s3c_device_adc, &s3c_device_ts, &s3c_device_ohci, }; static void __init real6410_map_io(void) { u32 tmp; s3c64xx_init_io(NULL, 0); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(real6410_uartcfgs, ARRAY_SIZE(real6410_uartcfgs)); /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the LCD bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } /* * real6410_features string * * 0-9 LCD configuration * */ static char real6410_features_str[12] __initdata = "0"; static int __init real6410_features_setup(char *str) { if (str) strlcpy(real6410_features_str, str, sizeof(real6410_features_str)); return 1; } __setup("real6410=", real6410_features_setup); #define FEATURE_SCREEN (1 << 0) struct real6410_features_t { int done; int lcd_index; }; static void real6410_parse_features( struct real6410_features_t *features, const char *features_str) { const char *fp = features_str; features->done = 0; features->lcd_index = 0; while (*fp) { char f = *fp++; switch (f) { case '0'...'9': /* tft screen */ if (features->done & FEATURE_SCREEN) { printk(KERN_INFO "REAL6410: '%c' ignored, " "screen type already set\n", f); } else { int li = f - '0'; if (li >= ARRAY_SIZE(real6410_fb_win)) printk(KERN_INFO "REAL6410: '%c' out " "of range LCD mode\n", f); else { features->lcd_index = li; } } features->done |= FEATURE_SCREEN; break; } } } static void __init real6410_machine_init(void) { u32 cs1; struct real6410_features_t features = { 0 }; printk(KERN_INFO "REAL6410: Option string real6410=%s\n", real6410_features_str); /* Parse the feature string */ real6410_parse_features(&features, real6410_features_str); real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index]; printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n", real6410_lcd_pdata.win[0]->win_mode.xres, real6410_lcd_pdata.win[0]->win_mode.yres); s3c_fb_set_platdata(&real6410_lcd_pdata); s3c_nand_set_platdata(&real6410_nand_info); s3c24xx_ts_set_platdata(NULL); /* configure nCS1 width to 16 bits */ cs1 = __raw_readl(S3C64XX_SROM_BW) & ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT); cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) | (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) | (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) << S3C64XX_SROM_BW__NCS1__SHIFT; __raw_writel(cs1, S3C64XX_SROM_BW); /* set timing for nCS1 suitable for ethernet chip */ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); gpio_request(S3C64XX_GPF(15), "LCD power"); platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); } MACHINE_START(REAL6410, "REAL6410") /* Maintainer: Darius Augulis <augulis.darius@gmail.com> */ .atag_offset = 0x100, .init_irq = s3c6410_init_irq, .handle_irq = vic_handle_irq, .map_io = real6410_map_io, .init_machine = real6410_machine_init, .timer = &s3c24xx_timer, .restart = s3c64xx_restart, MACHINE_END
gpl-2.0
XirXes/pyramid-3.4.10
drivers/scsi/fcoe/fcoe_transport.c
4822
21580
/* * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/errno.h> #include <linux/crc32.h> #include <scsi/libfcoe.h> #include "libfcoe.h" MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); MODULE_LICENSE("GPL v2"); static int fcoe_transport_create(const char *, struct kernel_param *); static int fcoe_transport_destroy(const char *, struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); static int fcoe_transport_enable(const char *, struct kernel_param *); static int fcoe_transport_disable(const char *, struct kernel_param *); static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr); static LIST_HEAD(fcoe_transports); static DEFINE_MUTEX(ft_mutex); static LIST_HEAD(fcoe_netdevs); static DEFINE_MUTEX(fn_mutex); unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); __MODULE_PARM_TYPE(show, "string"); MODULE_PARM_DESC(show, " Show attached FCoE transports"); module_param_call(create, fcoe_transport_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); module_param_call(create_vn2vn, fcoe_transport_create, NULL, (void *)FIP_MODE_VN2VN, S_IWUSR); __MODULE_PARM_TYPE(create_vn2vn, "string"); MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " "on an Ethernet interface"); module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(destroy, "string"); MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(enable, "string"); MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(disable, "string"); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); /* notification function for packets from net device */ static struct notifier_block libfcoe_notifier = { .notifier_call = libfcoe_device_notification, }; void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb, struct net_device *netdev) { unsigned int cpu; u32 lfc, vlfc, mdac; struct fcoe_dev_stats *devst; struct fcoe_fc_els_lesb *lesb; struct rtnl_link_stats64 temp; lfc = 0; vlfc = 0; mdac = 0; lesb = (struct fcoe_fc_els_lesb *)fc_lesb; memset(lesb, 0, sizeof(*lesb)); for_each_possible_cpu(cpu) { devst = per_cpu_ptr(lport->dev_stats, cpu); lfc += devst->LinkFailureCount; vlfc += devst->VLinkFailureCount; mdac += devst->MissDiscAdvCount; } lesb->lesb_link_fail = htonl(lfc); lesb->lesb_vlink_fail = htonl(vlfc); lesb->lesb_miss_fka = htonl(mdac); lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); } EXPORT_SYMBOL_GPL(__fcoe_get_lesb); void fcoe_wwn_to_str(u64 wwn, char *buf, int len) { u8 wwpn[8]; u64_to_wwn(wwn, wwpn); snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], wwpn[7]); } EXPORT_SYMBOL_GPL(fcoe_wwn_to_str); /** * fcoe_validate_vport_create() - Validate a vport before creating it * @vport: NPIV port to be created * * This routine is meant to add validation for a vport before creating it * via fcoe_vport_create(). * Current validations are: * - WWPN supplied is unique for given lport */ int fcoe_validate_vport_create(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port; int rc = 0; char buf[32]; mutex_lock(&n_port->lp_mutex); fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); /* Check if the wwpn is not same as that of the lport */ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the " "base port WWPN\n", buf); rc = -EINVAL; goto out; } /* Check if there is any existing vport with same wwpn */ list_for_each_entry(vn_port, &n_port->vports, list) { if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s " "already exists\n", buf); rc = -EINVAL; break; } } out: mutex_unlock(&n_port->lp_mutex); return rc; } EXPORT_SYMBOL_GPL(fcoe_validate_vport_create); /** * fcoe_get_wwn() - Get the world wide name from LLD if it supports it * @netdev: the associated net device * @wwn: the output WWN * @type: the type of WWN (WWPN or WWNN) * * Returns: 0 for success */ int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { const struct net_device_ops *ops = netdev->netdev_ops; if (ops->ndo_fcoe_get_wwn) return ops->ndo_fcoe_get_wwn(netdev, wwn, type); return -EINVAL; } EXPORT_SYMBOL_GPL(fcoe_get_wwn); /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed * * This uses crc32() routine to calculate the CRC for a frame * * Return: The 32 bit CRC value */ u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); struct skb_frag_struct *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; unsigned i; crc = crc32(~0, skb->data, skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; off = frag->page_offset; len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); data = kmap_atomic( skb_frag_page(frag) + (off >> PAGE_SHIFT)); crc = crc32(crc, data + (off & ~PAGE_MASK), clen); kunmap_atomic(data); off += clen; len -= clen; } } return crc; } EXPORT_SYMBOL_GPL(fcoe_fc_crc); /** * fcoe_start_io() - Start FCoE I/O * @skb: The packet to be transmitted * * This routine is called from the net device to start transmitting * FCoE packets. * * Returns: 0 for success */ int fcoe_start_io(struct sk_buff *skb) { struct sk_buff *nskb; int rc; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; rc = dev_queue_xmit(nskb); if (rc != 0) return rc; kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(fcoe_start_io); /** * fcoe_clean_pending_queue() - Dequeue a skb and free it * @lport: The local port to dequeue a skb on */ void fcoe_clean_pending_queue(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct sk_buff *skb; spin_lock_bh(&port->fcoe_pending_queue.lock); while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { spin_unlock_bh(&port->fcoe_pending_queue.lock); kfree_skb(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); } spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); /** * fcoe_check_wait_queue() - Attempt to clear the transmit backlog * @lport: The local port whose backlog is to be cleared * * This empties the wait_queue, dequeues the head of the wait_queue queue * and calls fcoe_start_io() for each packet. If all skb have been * transmitted it returns the qlen. If an error occurs it restores * wait_queue (to try again later) and returns -1. * * The wait_queue is used when the skb transmit fails. The failed skb * will go in the wait_queue which will be emptied by the timer function or * by the next skb transmit. */ void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) { struct fcoe_port *port = lport_priv(lport); int rc; spin_lock_bh(&port->fcoe_pending_queue.lock); if (skb) __skb_queue_tail(&port->fcoe_pending_queue, skb); if (port->fcoe_pending_queue_active) goto out; port->fcoe_pending_queue_active = 1; while (port->fcoe_pending_queue.qlen) { /* keep qlen > 0 until fcoe_start_io succeeds */ port->fcoe_pending_queue.qlen++; skb = __skb_dequeue(&port->fcoe_pending_queue); spin_unlock_bh(&port->fcoe_pending_queue.lock); rc = fcoe_start_io(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); if (rc) { __skb_queue_head(&port->fcoe_pending_queue, skb); /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; break; } /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; } if (port->fcoe_pending_queue.qlen < port->min_queue_depth) lport->qfull = 0; if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) mod_timer(&port->timer, jiffies + 2); port->fcoe_pending_queue_active = 0; out: if (port->fcoe_pending_queue.qlen > port->max_queue_depth) lport->qfull = 1; spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); /** * fcoe_queue_timer() - The fcoe queue timer * @lport: The local port * * Calls fcoe_check_wait_queue on timeout */ void fcoe_queue_timer(ulong lport) { fcoe_check_wait_queue((struct fc_lport *)lport, NULL); } EXPORT_SYMBOL_GPL(fcoe_queue_timer); /** * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC * @skb: The packet to be transmitted * @tlen: The total length of the trailer * @fps: The fcoe context * * This routine allocates a page for frame trailers. The page is re-used if * there is enough room left on it for the current trailer. If there isn't * enough buffer left a new page is allocated for the trailer. Reference to * the page from this function as well as the skbs using the page fragments * ensure that the page is freed at the appropriate time. * * Returns: 0 for success */ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, struct fcoe_percpu_s *fps) { struct page *page; page = fps->crc_eof_page; if (!page) { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; fps->crc_eof_page = page; fps->crc_eof_offset = 0; } get_page(page); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, fps->crc_eof_offset, tlen); skb->len += tlen; skb->data_len += tlen; skb->truesize += tlen; fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); if (fps->crc_eof_offset >= PAGE_SIZE) { fps->crc_eof_page = NULL; fps->crc_eof_offset = 0; put_page(page); } return 0; } EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); /** * fcoe_transport_lookup - find an fcoe transport that matches a netdev * @netdev: The netdev to look for from all attached transports * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; list_for_each_entry(ft, &fcoe_transports, list) if (ft->match && ft->match(netdev)) return ft; return NULL; } /** * fcoe_transport_attach - Attaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_attach(struct fcoe_transport *ft) { int rc = 0; mutex_lock(&ft_mutex); if (ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", ft->name); rc = -EEXIST; goto out_attach; } /* Add default transport to the tail */ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) list_add(&ft->list, &fcoe_transports); else list_add_tail(&ft->list, &fcoe_transports); ft->attached = true; LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_attach); /** * fcoe_transport_detach - Detaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_detach(struct fcoe_transport *ft) { int rc = 0; struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&ft_mutex); if (!ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", ft->name); rc = -ENODEV; goto out_attach; } /* remove netdev mapping for this transport as it is going away */ mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->ft == ft) { LIBFCOE_TRANSPORT_DBG("transport %s going away, " "remove its netdev mapping for %s\n", ft->name, nm->netdev->name); list_del(&nm->list); kfree(nm); } } mutex_unlock(&fn_mutex); list_del(&ft->list); ft->attached = false; LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_detach); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) { int i, j; struct fcoe_transport *ft = NULL; i = j = sprintf(buffer, "Attached FCoE transports:"); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) { if (i >= PAGE_SIZE - IFNAMSIZ) break; i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); } mutex_unlock(&ft_mutex); if (i == j) i += snprintf(&buffer[i], IFNAMSIZ, "none"); return i; } static int __init fcoe_transport_init(void) { register_netdevice_notifier(&libfcoe_notifier); return 0; } static int __exit fcoe_transport_exit(void) { struct fcoe_transport *ft; unregister_netdevice_notifier(&libfcoe_notifier); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) printk(KERN_ERR "FCoE transport %s is still attached!\n", ft->name); mutex_unlock(&ft_mutex); return 0; } static int fcoe_add_netdev_mapping(struct net_device *netdev, struct fcoe_transport *ft) { struct fcoe_netdev_mapping *nm; nm = kmalloc(sizeof(*nm), GFP_KERNEL); if (!nm) { printk(KERN_ERR "Unable to allocate netdev_mapping"); return -ENOMEM; } nm->netdev = netdev; nm->ft = ft; mutex_lock(&fn_mutex); list_add(&nm->list, &fcoe_netdevs); mutex_unlock(&fn_mutex); return 0; } static void fcoe_del_netdev_mapping(struct net_device *netdev) { struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->netdev == netdev) { list_del(&nm->list); kfree(nm); mutex_unlock(&fn_mutex); return; } } mutex_unlock(&fn_mutex); } /** * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which * it was created * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; struct fcoe_netdev_mapping *nm; mutex_lock(&fn_mutex); list_for_each_entry(nm, &fcoe_netdevs, list) { if (netdev == nm->netdev) { ft = nm->ft; mutex_unlock(&fn_mutex); return ft; } } mutex_unlock(&fn_mutex); return NULL; } /** * fcoe_if_to_netdev() - Parse a name buffer to get a net device * @buffer: The name of the net device * * Returns: NULL or a ptr to net_device */ static struct net_device *fcoe_if_to_netdev(const char *buffer) { char *cp; char ifname[IFNAMSIZ + 2]; if (buffer) { strlcpy(ifname, buffer, IFNAMSIZ); cp = ifname + strlen(ifname); while (--cp >= ifname && *cp == '\n') *cp = '\0'; return dev_get_by_name(&init_net, ifname); } return NULL; } /** * libfcoe_device_notification() - Handler for net device events * @notifier: The context of the notification * @event: The type of event * @ptr: The net device that the event was on * * This function is called by the Ethernet driver in case of link change event. * * Returns: 0 for success */ static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { struct net_device *netdev = ptr; switch (event) { case NETDEV_UNREGISTER: LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n", netdev->name); fcoe_del_netdev_mapping(netdev); break; } return NOTIFY_OK; } /** * fcoe_transport_create() - Create a fcoe interface * @buffer: The name of the Ethernet interface to create on * @kp: The associated kernel param * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's create function. * * Returns: 0 for success */ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; enum fip_state fip_mode = (enum fip_state)(long)kp->arg; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (ft) { LIBFCOE_TRANSPORT_DBG("transport %s already has existing " "FCoE instance on %s.\n", ft->name, netdev->name); rc = -EEXIST; goto out_putdev; } ft = fcoe_transport_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } rc = fcoe_add_netdev_mapping(netdev, ft); if (rc) { LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " "for FCoE transport %s for %s.\n", ft->name, netdev->name); goto out_putdev; } /* pass to transport create */ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; if (rc) fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_destroy() - Destroy a FCoE interface * @buffer: The name of the Ethernet interface to be destroyed * @kp: The associated kernel parameter * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's destroy function. * * Returns: 0 for success */ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } /* pass to transport destroy */ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_disable() - Disables a FCoE interface * @buffer: The name of the Ethernet interface to be disabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->disable ? ft->disable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); if (rc == -ERESTARTSYS) return restart_syscall(); else return rc; } /** * fcoe_transport_enable() - Enables a FCoE interface * @buffer: The name of the Ethernet interface to be enabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->enable ? ft->enable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * libfcoe_init() - Initialization routine for libfcoe.ko */ static int __init libfcoe_init(void) { fcoe_transport_init(); return 0; } module_init(libfcoe_init); /** * libfcoe_exit() - Tear down libfcoe.ko */ static void __exit libfcoe_exit(void) { fcoe_transport_exit(); } module_exit(libfcoe_exit);
gpl-2.0
Flyhalf205/android_kernel_htc_t6
drivers/pcmcia/cs.c
5078
21505
/* * cs.c -- Kernel Card Services - core services * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/device.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <asm/irq.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "cs_internal.h" /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Linux Kernel Card Services"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) INT_MODULE_PARM(setup_delay, 10); /* centiseconds */ INT_MODULE_PARM(resume_delay, 20); /* centiseconds */ INT_MODULE_PARM(shutdown_delay, 3); /* centiseconds */ INT_MODULE_PARM(vcc_settle, 40); /* centiseconds */ INT_MODULE_PARM(reset_time, 10); /* usecs */ INT_MODULE_PARM(unreset_delay, 10); /* centiseconds */ INT_MODULE_PARM(unreset_check, 10); /* centiseconds */ INT_MODULE_PARM(unreset_limit, 30); /* unreset_check's */ /* Access speed for attribute memory windows */ INT_MODULE_PARM(cis_speed, 300); /* ns */ socket_state_t dead_socket = { .csc_mask = SS_DETECT, }; EXPORT_SYMBOL(dead_socket); /* List of all sockets, protected by a rwsem */ LIST_HEAD(pcmcia_socket_list); EXPORT_SYMBOL(pcmcia_socket_list); DECLARE_RWSEM(pcmcia_socket_list_rwsem); EXPORT_SYMBOL(pcmcia_socket_list_rwsem); struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt) { struct device *dev = get_device(&skt->dev); if (!dev) return NULL; return dev_get_drvdata(dev); } EXPORT_SYMBOL(pcmcia_get_socket); void pcmcia_put_socket(struct pcmcia_socket *skt) { put_device(&skt->dev); } EXPORT_SYMBOL(pcmcia_put_socket); static void pcmcia_release_socket(struct device *dev) { struct pcmcia_socket *socket = dev_get_drvdata(dev); complete(&socket->socket_released); } static int pccardd(void *__skt); /** * pcmcia_register_socket - add a new pcmcia socket device * @socket: the &socket to register */ int pcmcia_register_socket(struct pcmcia_socket *socket) { struct task_struct *tsk; int ret; if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops) return -EINVAL; dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops); /* try to obtain a socket number [yes, it gets ugly if we * register more than 2^sizeof(unsigned int) pcmcia * sockets... but the socket number is deprecated * anyways, so I don't care] */ down_write(&pcmcia_socket_list_rwsem); if (list_empty(&pcmcia_socket_list)) socket->sock = 0; else { unsigned int found, i = 1; struct pcmcia_socket *tmp; do { found = 1; list_for_each_entry(tmp, &pcmcia_socket_list, socket_list) { if (tmp->sock == i) found = 0; } i++; } while (!found); socket->sock = i - 1; } list_add_tail(&socket->socket_list, &pcmcia_socket_list); up_write(&pcmcia_socket_list_rwsem); #ifndef CONFIG_CARDBUS /* * If we do not support Cardbus, ensure that * the Cardbus socket capability is disabled. */ socket->features &= ~SS_CAP_CARDBUS; #endif /* set proper values in socket->dev */ dev_set_drvdata(&socket->dev, socket); socket->dev.class = &pcmcia_socket_class; dev_set_name(&socket->dev, "pcmcia_socket%u", socket->sock); /* base address = 0, map = 0 */ socket->cis_mem.flags = 0; socket->cis_mem.speed = cis_speed; INIT_LIST_HEAD(&socket->cis_cache); init_completion(&socket->socket_released); init_completion(&socket->thread_done); mutex_init(&socket->skt_mutex); mutex_init(&socket->ops_mutex); spin_lock_init(&socket->thread_lock); if (socket->resource_ops->init) { mutex_lock(&socket->ops_mutex); ret = socket->resource_ops->init(socket); mutex_unlock(&socket->ops_mutex); if (ret) goto err; } tsk = kthread_run(pccardd, socket, "pccardd"); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err; } wait_for_completion(&socket->thread_done); if (!socket->thread) { dev_printk(KERN_WARNING, &socket->dev, "PCMCIA: warning: socket thread did not start\n"); return -EIO; } pcmcia_parse_events(socket, SS_DETECT); /* * Let's try to get the PCMCIA module for 16-bit PCMCIA support. * If it fails, it doesn't matter -- we still have 32-bit CardBus * support to offer, so this is not a failure mode. */ request_module_nowait("pcmcia"); return 0; err: down_write(&pcmcia_socket_list_rwsem); list_del(&socket->socket_list); up_write(&pcmcia_socket_list_rwsem); return ret; } /* pcmcia_register_socket */ EXPORT_SYMBOL(pcmcia_register_socket); /** * pcmcia_unregister_socket - remove a pcmcia socket device * @socket: the &socket to unregister */ void pcmcia_unregister_socket(struct pcmcia_socket *socket) { if (!socket) return; dev_dbg(&socket->dev, "pcmcia_unregister_socket(0x%p)\n", socket->ops); if (socket->thread) kthread_stop(socket->thread); /* remove from our own list */ down_write(&pcmcia_socket_list_rwsem); list_del(&socket->socket_list); up_write(&pcmcia_socket_list_rwsem); /* wait for sysfs to drop all references */ if (socket->resource_ops->exit) { mutex_lock(&socket->ops_mutex); socket->resource_ops->exit(socket); mutex_unlock(&socket->ops_mutex); } wait_for_completion(&socket->socket_released); } /* pcmcia_unregister_socket */ EXPORT_SYMBOL(pcmcia_unregister_socket); struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr) { struct pcmcia_socket *s; down_read(&pcmcia_socket_list_rwsem); list_for_each_entry(s, &pcmcia_socket_list, socket_list) if (s->sock == nr) { up_read(&pcmcia_socket_list_rwsem); return s; } up_read(&pcmcia_socket_list_rwsem); return NULL; } EXPORT_SYMBOL(pcmcia_get_socket_by_nr); static int socket_reset(struct pcmcia_socket *skt) { int status, i; dev_dbg(&skt->dev, "reset\n"); skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET; skt->ops->set_socket(skt, &skt->socket); udelay((long)reset_time); skt->socket.flags &= ~SS_RESET; skt->ops->set_socket(skt, &skt->socket); msleep(unreset_delay * 10); for (i = 0; i < unreset_limit; i++) { skt->ops->get_status(skt, &status); if (!(status & SS_DETECT)) return -ENODEV; if (status & SS_READY) return 0; msleep(unreset_check * 10); } dev_printk(KERN_ERR, &skt->dev, "time out after reset.\n"); return -ETIMEDOUT; } /* * socket_setup() and socket_shutdown() are called by the main event handler * when card insertion and removal events are received. * socket_setup() turns on socket power and resets the socket, in two stages. * socket_shutdown() unconfigures a socket and turns off socket power. */ static void socket_shutdown(struct pcmcia_socket *s) { int status; dev_dbg(&s->dev, "shutdown\n"); if (s->callback) s->callback->remove(s); mutex_lock(&s->ops_mutex); s->state &= SOCKET_INUSE | SOCKET_PRESENT; msleep(shutdown_delay * 10); s->state &= SOCKET_INUSE; /* Blank out the socket state */ s->socket = dead_socket; s->ops->init(s); s->ops->set_socket(s, &s->socket); s->lock_count = 0; kfree(s->fake_cis); s->fake_cis = NULL; s->functions = 0; /* From here on we can be sure that only we (that is, the * pccardd thread) accesses this socket, and all (16-bit) * PCMCIA interactions are gone. Therefore, release * ops_mutex so that we don't get a sysfs-related lockdep * warning. */ mutex_unlock(&s->ops_mutex); #ifdef CONFIG_CARDBUS cb_free(s); #endif /* give socket some time to power down */ msleep(100); s->ops->get_status(s, &status); if (status & SS_POWERON) { dev_printk(KERN_ERR, &s->dev, "*** DANGER *** unable to remove socket power\n"); } s->state &= ~SOCKET_INUSE; } static int socket_setup(struct pcmcia_socket *skt, int initial_delay) { int status, i; dev_dbg(&skt->dev, "setup\n"); skt->ops->get_status(skt, &status); if (!(status & SS_DETECT)) return -ENODEV; msleep(initial_delay * 10); for (i = 0; i < 100; i++) { skt->ops->get_status(skt, &status); if (!(status & SS_DETECT)) return -ENODEV; if (!(status & SS_PENDING)) break; msleep(100); } if (status & SS_PENDING) { dev_printk(KERN_ERR, &skt->dev, "voltage interrogation timed out.\n"); return -ETIMEDOUT; } if (status & SS_CARDBUS) { if (!(skt->features & SS_CAP_CARDBUS)) { dev_printk(KERN_ERR, &skt->dev, "cardbus cards are not supported.\n"); return -EINVAL; } skt->state |= SOCKET_CARDBUS; } else skt->state &= ~SOCKET_CARDBUS; /* * Decode the card voltage requirements, and apply power to the card. */ if (status & SS_3VCARD) skt->socket.Vcc = skt->socket.Vpp = 33; else if (!(status & SS_XVCARD)) skt->socket.Vcc = skt->socket.Vpp = 50; else { dev_printk(KERN_ERR, &skt->dev, "unsupported voltage key.\n"); return -EIO; } if (skt->power_hook) skt->power_hook(skt, HOOK_POWER_PRE); skt->socket.flags = 0; skt->ops->set_socket(skt, &skt->socket); /* * Wait "vcc_settle" for the supply to stabilise. */ msleep(vcc_settle * 10); skt->ops->get_status(skt, &status); if (!(status & SS_POWERON)) { dev_printk(KERN_ERR, &skt->dev, "unable to apply power.\n"); return -EIO; } status = socket_reset(skt); if (skt->power_hook) skt->power_hook(skt, HOOK_POWER_POST); return status; } /* * Handle card insertion. Setup the socket, reset the card, * and then tell the rest of PCMCIA that a card is present. */ static int socket_insert(struct pcmcia_socket *skt) { int ret; dev_dbg(&skt->dev, "insert\n"); mutex_lock(&skt->ops_mutex); if (skt->state & SOCKET_INUSE) { mutex_unlock(&skt->ops_mutex); return -EINVAL; } skt->state |= SOCKET_INUSE; ret = socket_setup(skt, setup_delay); if (ret == 0) { skt->state |= SOCKET_PRESENT; dev_printk(KERN_NOTICE, &skt->dev, "pccard: %s card inserted into slot %d\n", (skt->state & SOCKET_CARDBUS) ? "CardBus" : "PCMCIA", skt->sock); #ifdef CONFIG_CARDBUS if (skt->state & SOCKET_CARDBUS) { cb_alloc(skt); skt->state |= SOCKET_CARDBUS_CONFIG; } #endif dev_dbg(&skt->dev, "insert done\n"); mutex_unlock(&skt->ops_mutex); if (!(skt->state & SOCKET_CARDBUS) && (skt->callback)) skt->callback->add(skt); } else { mutex_unlock(&skt->ops_mutex); socket_shutdown(skt); } return ret; } static int socket_suspend(struct pcmcia_socket *skt) { if (skt->state & SOCKET_SUSPEND) return -EBUSY; mutex_lock(&skt->ops_mutex); skt->suspended_state = skt->state; skt->socket = dead_socket; skt->ops->set_socket(skt, &skt->socket); if (skt->ops->suspend) skt->ops->suspend(skt); skt->state |= SOCKET_SUSPEND; mutex_unlock(&skt->ops_mutex); return 0; } static int socket_early_resume(struct pcmcia_socket *skt) { mutex_lock(&skt->ops_mutex); skt->socket = dead_socket; skt->ops->init(skt); skt->ops->set_socket(skt, &skt->socket); if (skt->state & SOCKET_PRESENT) skt->resume_status = socket_setup(skt, resume_delay); mutex_unlock(&skt->ops_mutex); return 0; } static int socket_late_resume(struct pcmcia_socket *skt) { int ret; mutex_lock(&skt->ops_mutex); skt->state &= ~SOCKET_SUSPEND; mutex_unlock(&skt->ops_mutex); if (!(skt->state & SOCKET_PRESENT)) { ret = socket_insert(skt); if (ret == -ENODEV) ret = 0; return ret; } if (skt->resume_status) { socket_shutdown(skt); return 0; } if (skt->suspended_state != skt->state) { dev_dbg(&skt->dev, "suspend state 0x%x != resume state 0x%x\n", skt->suspended_state, skt->state); socket_shutdown(skt); return socket_insert(skt); } #ifdef CONFIG_CARDBUS if (skt->state & SOCKET_CARDBUS) { /* We can't be sure the CardBus card is the same * as the one previously inserted. Therefore, remove * and re-add... */ cb_free(skt); cb_alloc(skt); return 0; } #endif if (!(skt->state & SOCKET_CARDBUS) && (skt->callback)) skt->callback->early_resume(skt); return 0; } /* * Resume a socket. If a card is present, verify its CIS against * our cached copy. If they are different, the card has been * replaced, and we need to tell the drivers. */ static int socket_resume(struct pcmcia_socket *skt) { if (!(skt->state & SOCKET_SUSPEND)) return -EBUSY; socket_early_resume(skt); return socket_late_resume(skt); } static void socket_remove(struct pcmcia_socket *skt) { dev_printk(KERN_NOTICE, &skt->dev, "pccard: card ejected from slot %d\n", skt->sock); socket_shutdown(skt); } /* * Process a socket card detect status change. * * If we don't have a card already present, delay the detect event for * about 20ms (to be on the safe side) before reading the socket status. * * Some i82365-based systems send multiple SS_DETECT events during card * insertion, and the "card present" status bit seems to bounce. This * will probably be true with GPIO-based card detection systems after * the product has aged. */ static void socket_detect_change(struct pcmcia_socket *skt) { if (!(skt->state & SOCKET_SUSPEND)) { int status; if (!(skt->state & SOCKET_PRESENT)) msleep(20); skt->ops->get_status(skt, &status); if ((skt->state & SOCKET_PRESENT) && !(status & SS_DETECT)) socket_remove(skt); if (!(skt->state & SOCKET_PRESENT) && (status & SS_DETECT)) socket_insert(skt); } } static int pccardd(void *__skt) { struct pcmcia_socket *skt = __skt; int ret; skt->thread = current; skt->socket = dead_socket; skt->ops->init(skt); skt->ops->set_socket(skt, &skt->socket); /* register with the device core */ ret = device_register(&skt->dev); if (ret) { dev_printk(KERN_WARNING, &skt->dev, "PCMCIA: unable to register socket\n"); skt->thread = NULL; complete(&skt->thread_done); return 0; } ret = pccard_sysfs_add_socket(&skt->dev); if (ret) dev_warn(&skt->dev, "err %d adding socket attributes\n", ret); complete(&skt->thread_done); /* wait for userspace to catch up */ msleep(250); set_freezable(); for (;;) { unsigned long flags; unsigned int events; unsigned int sysfs_events; set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&skt->thread_lock, flags); events = skt->thread_events; skt->thread_events = 0; sysfs_events = skt->sysfs_events; skt->sysfs_events = 0; spin_unlock_irqrestore(&skt->thread_lock, flags); mutex_lock(&skt->skt_mutex); if (events & SS_DETECT) socket_detect_change(skt); if (sysfs_events) { if (sysfs_events & PCMCIA_UEVENT_EJECT) socket_remove(skt); if (sysfs_events & PCMCIA_UEVENT_INSERT) socket_insert(skt); if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) && !(skt->state & SOCKET_CARDBUS)) { if (skt->callback) ret = skt->callback->suspend(skt); else ret = 0; if (!ret) { socket_suspend(skt); msleep(100); } } if ((sysfs_events & PCMCIA_UEVENT_RESUME) && !(skt->state & SOCKET_CARDBUS)) { ret = socket_resume(skt); if (!ret && skt->callback) skt->callback->resume(skt); } if ((sysfs_events & PCMCIA_UEVENT_REQUERY) && !(skt->state & SOCKET_CARDBUS)) { if (!ret && skt->callback) skt->callback->requery(skt); } } mutex_unlock(&skt->skt_mutex); if (events || sysfs_events) continue; if (kthread_should_stop()) break; schedule(); try_to_freeze(); } /* make sure we are running before we exit */ set_current_state(TASK_RUNNING); /* shut down socket, if a device is still present */ if (skt->state & SOCKET_PRESENT) { mutex_lock(&skt->skt_mutex); socket_remove(skt); mutex_unlock(&skt->skt_mutex); } /* remove from the device core */ pccard_sysfs_remove_socket(&skt->dev); device_unregister(&skt->dev); return 0; } /* * Yenta (at least) probes interrupts before registering the socket and * starting the handler thread. */ void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) { unsigned long flags; dev_dbg(&s->dev, "parse_events: events %08x\n", events); if (s->thread) { spin_lock_irqsave(&s->thread_lock, flags); s->thread_events |= events; spin_unlock_irqrestore(&s->thread_lock, flags); wake_up_process(s->thread); } } /* pcmcia_parse_events */ EXPORT_SYMBOL(pcmcia_parse_events); /** * pcmcia_parse_uevents() - tell pccardd to issue manual commands * @s: the PCMCIA socket we wan't to command * @events: events to pass to pccardd * * userspace-issued insert, eject, suspend and resume commands must be * handled by pccardd to avoid any sysfs-related deadlocks. Valid events * are PCMCIA_UEVENT_EJECT (for eject), PCMCIA_UEVENT__INSERT (for insert), * PCMCIA_UEVENT_RESUME (for resume), PCMCIA_UEVENT_SUSPEND (for suspend) * and PCMCIA_UEVENT_REQUERY (for re-querying the PCMCIA card). */ void pcmcia_parse_uevents(struct pcmcia_socket *s, u_int events) { unsigned long flags; dev_dbg(&s->dev, "parse_uevents: events %08x\n", events); if (s->thread) { spin_lock_irqsave(&s->thread_lock, flags); s->sysfs_events |= events; spin_unlock_irqrestore(&s->thread_lock, flags); wake_up_process(s->thread); } } EXPORT_SYMBOL(pcmcia_parse_uevents); /* register pcmcia_callback */ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c) { int ret = 0; /* s->skt_mutex also protects s->callback */ mutex_lock(&s->skt_mutex); if (c) { /* registration */ if (s->callback) { ret = -EBUSY; goto err; } s->callback = c; if ((s->state & (SOCKET_PRESENT|SOCKET_CARDBUS)) == SOCKET_PRESENT) s->callback->add(s); } else s->callback = NULL; err: mutex_unlock(&s->skt_mutex); return ret; } EXPORT_SYMBOL(pccard_register_pcmcia); /* I'm not sure which "reset" function this is supposed to use, * but for now, it uses the low-level interface's reset, not the * CIS register. */ int pcmcia_reset_card(struct pcmcia_socket *skt) { int ret; dev_dbg(&skt->dev, "resetting socket\n"); mutex_lock(&skt->skt_mutex); do { if (!(skt->state & SOCKET_PRESENT)) { dev_dbg(&skt->dev, "can't reset, not present\n"); ret = -ENODEV; break; } if (skt->state & SOCKET_SUSPEND) { dev_dbg(&skt->dev, "can't reset, suspended\n"); ret = -EBUSY; break; } if (skt->state & SOCKET_CARDBUS) { dev_dbg(&skt->dev, "can't reset, is cardbus\n"); ret = -EPERM; break; } if (skt->callback) skt->callback->suspend(skt); mutex_lock(&skt->ops_mutex); ret = socket_reset(skt); mutex_unlock(&skt->ops_mutex); if ((ret == 0) && (skt->callback)) skt->callback->resume(skt); ret = 0; } while (0); mutex_unlock(&skt->skt_mutex); return ret; } /* reset_card */ EXPORT_SYMBOL(pcmcia_reset_card); static int pcmcia_socket_uevent(struct device *dev, struct kobj_uevent_env *env) { struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); if (add_uevent_var(env, "SOCKET_NO=%u", s->sock)) return -ENOMEM; return 0; } static struct completion pcmcia_unload; static void pcmcia_release_socket_class(struct class *data) { complete(&pcmcia_unload); } #ifdef CONFIG_PM static int __pcmcia_pm_op(struct device *dev, int (*callback) (struct pcmcia_socket *skt)) { struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); int ret; mutex_lock(&s->skt_mutex); ret = callback(s); mutex_unlock(&s->skt_mutex); return ret; } static int pcmcia_socket_dev_suspend_noirq(struct device *dev) { return __pcmcia_pm_op(dev, socket_suspend); } static int pcmcia_socket_dev_resume_noirq(struct device *dev) { return __pcmcia_pm_op(dev, socket_early_resume); } static int __used pcmcia_socket_dev_resume(struct device *dev) { return __pcmcia_pm_op(dev, socket_late_resume); } static const struct dev_pm_ops pcmcia_socket_pm_ops = { /* dev_resume may be called with IRQs enabled */ SET_SYSTEM_SLEEP_PM_OPS(NULL, pcmcia_socket_dev_resume) /* late suspend must be called with IRQs disabled */ .suspend_noirq = pcmcia_socket_dev_suspend_noirq, .freeze_noirq = pcmcia_socket_dev_suspend_noirq, .poweroff_noirq = pcmcia_socket_dev_suspend_noirq, /* early resume must be called with IRQs disabled */ .resume_noirq = pcmcia_socket_dev_resume_noirq, .thaw_noirq = pcmcia_socket_dev_resume_noirq, .restore_noirq = pcmcia_socket_dev_resume_noirq, }; #define PCMCIA_SOCKET_CLASS_PM_OPS (&pcmcia_socket_pm_ops) #else /* CONFIG_PM */ #define PCMCIA_SOCKET_CLASS_PM_OPS NULL #endif /* CONFIG_PM */ struct class pcmcia_socket_class = { .name = "pcmcia_socket", .dev_uevent = pcmcia_socket_uevent, .dev_release = pcmcia_release_socket, .class_release = pcmcia_release_socket_class, .pm = PCMCIA_SOCKET_CLASS_PM_OPS, }; EXPORT_SYMBOL(pcmcia_socket_class); static int __init init_pcmcia_cs(void) { init_completion(&pcmcia_unload); return class_register(&pcmcia_socket_class); } static void __exit exit_pcmcia_cs(void) { class_unregister(&pcmcia_socket_class); wait_for_completion(&pcmcia_unload); } subsys_initcall(init_pcmcia_cs); module_exit(exit_pcmcia_cs);
gpl-2.0
emceethemouth/kernel
arch/m68k/kernel/m68k_ksyms.c
8918
1061
#include <linux/module.h> asmlinkage long long __ashldi3 (long long, int); asmlinkage long long __ashrdi3 (long long, int); asmlinkage long long __lshrdi3 (long long, int); asmlinkage long long __muldi3 (long long, long long); /* The following are special because they're not called explicitly (the C compiler generates them). Fortunately, their interface isn't gonna change any time soon now, so it's OK to leave it out of version control. */ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); #if defined(CONFIG_CPU_HAS_NO_MULDIV64) /* * Simpler 68k and ColdFire parts also need a few other gcc functions. */ extern long long __divsi3(long long, long long); extern long long __modsi3(long long, long long); extern long long __mulsi3(long long, long long); extern long long __udivsi3(long long, long long); extern long long __umodsi3(long long, long long); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); #endif
gpl-2.0
SimpleAOSP-Kernel/kernel_flounder
drivers/net/wireless/b43legacy/debugfs.c
9174
12066
/* Broadcom B43legacy wireless driver debugfs driver debugging code Copyright (c) 2005-2007 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/mutex.h> #include "b43legacy.h" #include "main.h" #include "debugfs.h" #include "dma.h" #include "pio.h" #include "xmit.h" /* The root directory. */ static struct dentry *rootdir; struct b43legacy_debugfs_fops { ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); struct file_operations fops; /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ size_t file_struct_offset; /* Take wl->irq_lock before calling read/write? */ bool take_irqlock; }; static inline struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev, const struct b43legacy_debugfs_fops *dfops) { void *p; p = dev->dfsentry; p += dfops->file_struct_offset; return p; } #define fappend(fmt, x...) \ do { \ if (bufsize - count) \ count += snprintf(buf + count, \ bufsize - count, \ fmt , ##x); \ else \ printk(KERN_ERR "b43legacy: fappend overflow\n"); \ } while (0) /* wl->irq_lock is locked */ static ssize_t tsf_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; u64 tsf; b43legacy_tsf_read(dev, &tsf); fappend("0x%08x%08x\n", (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), (unsigned int)(tsf & 0xFFFFFFFFULL)); return count; } /* wl->irq_lock is locked */ static int tsf_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) { u64 tsf; if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) return -EINVAL; b43legacy_tsf_write(dev, tsf); return 0; } /* wl->irq_lock is locked */ static ssize_t ucode_regs_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; int i; for (i = 0; i < 64; i++) { fappend("r%d = 0x%04x\n", i, b43legacy_shm_read16(dev, B43legacy_SHM_WIRELESS, i)); } return count; } /* wl->irq_lock is locked */ static ssize_t shm_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; int i; u16 tmp; __le16 *le16buf = (__le16 *)buf; for (i = 0; i < 0x1000; i++) { if (bufsize < sizeof(tmp)) break; tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 2 * i); le16buf[i] = cpu_to_le16(tmp); count += sizeof(tmp); bufsize -= sizeof(tmp); } return count; } static ssize_t txstat_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) { struct b43legacy_txstatus_log *log = &dev->dfsentry->txstatlog; ssize_t count = 0; unsigned long flags; int i, idx; struct b43legacy_txstatus *stat; spin_lock_irqsave(&log->lock, flags); if (log->end < 0) { fappend("Nothing transmitted, yet\n"); goto out_unlock; } fappend("b43legacy TX status reports:\n\n" "index | cookie | seq | phy_stat | frame_count | " "rts_count | supp_reason | pm_indicated | " "intermediate | for_ampdu | acked\n" "---\n"); i = log->end + 1; idx = 0; while (1) { if (i == B43legacy_NR_LOGGED_TXSTATUS) i = 0; stat = &(log->log[i]); if (stat->cookie) { fappend("%03d | " "0x%04X | 0x%04X | 0x%02X | " "0x%X | 0x%X | " "%u | %u | " "%u | %u | %u\n", idx, stat->cookie, stat->seq, stat->phy_stat, stat->frame_count, stat->rts_count, stat->supp_reason, stat->pm_indicated, stat->intermediate, stat->for_ampdu, stat->acked); idx++; } if (i == log->end) break; i++; } out_unlock: spin_unlock_irqrestore(&log->lock, flags); return count; } /* wl->irq_lock is locked */ static int restart_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) { int err = 0; if (count > 0 && buf[0] == '1') { b43legacy_controller_restart(dev, "manually restarted"); } else err = -EINVAL; return err; } #undef fappend static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct b43legacy_wldev *dev; struct b43legacy_debugfs_fops *dfops; struct b43legacy_dfs_file *dfile; ssize_t uninitialized_var(ret); char *buf; const size_t bufsize = 1024 * 16; /* 16 KiB buffer */ const size_t buforder = get_order(bufsize); int err = 0; if (!count) return 0; dev = file->private_data; if (!dev) return -ENODEV; mutex_lock(&dev->wl->mutex); if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { err = -ENODEV; goto out_unlock; } dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); if (!dfops->read) { err = -ENOSYS; goto out_unlock; } dfile = fops_to_dfs_file(dev, dfops); if (!dfile->buffer) { buf = (char *)__get_free_pages(GFP_KERNEL, buforder); if (!buf) { err = -ENOMEM; goto out_unlock; } memset(buf, 0, bufsize); if (dfops->take_irqlock) { spin_lock_irq(&dev->wl->irq_lock); ret = dfops->read(dev, buf, bufsize); spin_unlock_irq(&dev->wl->irq_lock); } else ret = dfops->read(dev, buf, bufsize); if (ret <= 0) { free_pages((unsigned long)buf, buforder); err = ret; goto out_unlock; } dfile->data_len = ret; dfile->buffer = buf; } ret = simple_read_from_buffer(userbuf, count, ppos, dfile->buffer, dfile->data_len); if (*ppos >= dfile->data_len) { free_pages((unsigned long)dfile->buffer, buforder); dfile->buffer = NULL; dfile->data_len = 0; } out_unlock: mutex_unlock(&dev->wl->mutex); return err ? err : ret; } static ssize_t b43legacy_debugfs_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct b43legacy_wldev *dev; struct b43legacy_debugfs_fops *dfops; char *buf; int err = 0; if (!count) return 0; if (count > PAGE_SIZE) return -E2BIG; dev = file->private_data; if (!dev) return -ENODEV; mutex_lock(&dev->wl->mutex); if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { err = -ENODEV; goto out_unlock; } dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); if (!dfops->write) { err = -ENOSYS; goto out_unlock; } buf = (char *)get_zeroed_page(GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out_unlock; } if (copy_from_user(buf, userbuf, count)) { err = -EFAULT; goto out_freepage; } if (dfops->take_irqlock) { spin_lock_irq(&dev->wl->irq_lock); err = dfops->write(dev, buf, count); spin_unlock_irq(&dev->wl->irq_lock); } else err = dfops->write(dev, buf, count); if (err) goto out_freepage; out_freepage: free_page((unsigned long)buf); out_unlock: mutex_unlock(&dev->wl->mutex); return err ? err : count; } #define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ static struct b43legacy_debugfs_fops fops_##name = { \ .read = _read, \ .write = _write, \ .fops = { \ .open = simple_open, \ .read = b43legacy_debugfs_read, \ .write = b43legacy_debugfs_write, \ .llseek = generic_file_llseek, \ }, \ .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ file_##name), \ .take_irqlock = _take_irqlock, \ } B43legacy_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); B43legacy_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); B43legacy_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); B43legacy_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); B43legacy_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); int b43legacy_debug(struct b43legacy_wldev *dev, enum b43legacy_dyndbg feature) { return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); } static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e = dev->dfsentry; int i; for (i = 0; i < __B43legacy_NR_DYNDBG; i++) debugfs_remove(e->dyn_debug_dentries[i]); } static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e = dev->dfsentry; struct dentry *d; #define add_dyn_dbg(name, id, initstate) do { \ e->dyn_debug[id] = (initstate); \ d = debugfs_create_bool(name, 0600, e->subdir, \ &(e->dyn_debug[id])); \ if (!IS_ERR(d)) \ e->dyn_debug_dentries[id] = d; \ } while (0) add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0); add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0); add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0); add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0); add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0); #undef add_dyn_dbg } void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e; struct b43legacy_txstatus_log *log; char devdir[16]; B43legacy_WARN_ON(!dev); e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) { b43legacyerr(dev->wl, "debugfs: add device OOM\n"); return; } e->dev = dev; log = &e->txstatlog; log->log = kcalloc(B43legacy_NR_LOGGED_TXSTATUS, sizeof(struct b43legacy_txstatus), GFP_KERNEL); if (!log->log) { b43legacyerr(dev->wl, "debugfs: add device txstatus OOM\n"); kfree(e); return; } log->end = -1; spin_lock_init(&log->lock); dev->dfsentry = e; snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); if (!e->subdir || IS_ERR(e->subdir)) { if (e->subdir == ERR_PTR(-ENODEV)) { b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " "enabled in kernel config\n"); } else { b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", devdir); } dev->dfsentry = NULL; kfree(log->log); kfree(e); return; } #define ADD_FILE(name, mode) \ do { \ struct dentry *d; \ d = debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ e->file_##name.dentry = NULL; \ if (!IS_ERR(d)) \ e->file_##name.dentry = d; \ } while (0) ADD_FILE(tsf, 0600); ADD_FILE(ucode_regs, 0400); ADD_FILE(shm, 0400); ADD_FILE(txstat, 0400); ADD_FILE(restart, 0200); #undef ADD_FILE b43legacy_add_dynamic_debug(dev); } void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e; if (!dev) return; e = dev->dfsentry; if (!e) return; b43legacy_remove_dynamic_debug(dev); debugfs_remove(e->file_tsf.dentry); debugfs_remove(e->file_ucode_regs.dentry); debugfs_remove(e->file_shm.dentry); debugfs_remove(e->file_txstat.dentry); debugfs_remove(e->file_restart.dentry); debugfs_remove(e->subdir); kfree(e->txstatlog.log); kfree(e); } void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, const struct b43legacy_txstatus *status) { struct b43legacy_dfsentry *e = dev->dfsentry; struct b43legacy_txstatus_log *log; struct b43legacy_txstatus *cur; int i; if (!e) return; log = &e->txstatlog; B43legacy_WARN_ON(!irqs_disabled()); spin_lock(&log->lock); i = log->end + 1; if (i == B43legacy_NR_LOGGED_TXSTATUS) i = 0; log->end = i; cur = &(log->log[i]); memcpy(cur, status, sizeof(*cur)); spin_unlock(&log->lock); } void b43legacy_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); if (IS_ERR(rootdir)) rootdir = NULL; } void b43legacy_debugfs_exit(void) { debugfs_remove(rootdir); }
gpl-2.0
junkie2100/android_kernel_zte_quantum
arch/h8300/boot/compressed/misc.c
11478
4342
/* * arch/h8300/boot/compressed/misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Adapted for h8300 by Yoshinori Sato 2006 */ #include <asm/uaccess.h> /* * gzip declarations */ #define OF(args) args #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; #define WSIZE 0x8000 /* Window size must be at least 32k, */ /* and a power of two */ static uch *inbuf; /* input buffer */ static uch window[WSIZE]; /* Sliding window buffer */ static unsigned insize = 0; /* valid bytes in inbuf */ static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ static unsigned outcnt = 0; /* bytes in output buffer */ /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) /* Diagnostic functions */ #ifdef DEBUG # define Assert(cond,msg) {if(!(cond)) error(msg);} # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ;} # define Tracevv(x) {if (verbose>1) fprintf x ;} # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif static int fill_inbuf(void); static void flush_window(void); static void error(char *m); extern char input_data[]; extern int input_len; static long bytes_out = 0; static uch *output_data; static unsigned long output_ptr = 0; static void error(char *m); int puts(const char *); extern int _text; /* Defined in vmlinux.lds.S */ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #define HEAP_SIZE 0x10000 #include "../../../../lib/inflate.c" #define SCR *((volatile unsigned char *)0xffff8a) #define TDR *((volatile unsigned char *)0xffff8b) #define SSR *((volatile unsigned char *)0xffff8c) int puts(const char *s) { return 0; } void* memset(void* s, int c, size_t n) { int i; char *ss = (char*)s; for (i=0;i<n;i++) ss[i] = c; return s; } void* memcpy(void* __dest, __const void* __src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i=0;i<__n;i++) d[i] = s[i]; return __dest; } /* =========================================================================== * Fill the input buffer. This is called only when the buffer is empty * and at least one byte is really needed. */ static int fill_inbuf(void) { if (insize != 0) { error("ran out of input data"); } inbuf = input_data; insize = input_len; inptr = 1; return inbuf[0]; } /* =========================================================================== * Write the output window window[0..outcnt-1] and update crc and bytes_out. * (Used for the decompressed data only.) */ static void flush_window(void) { ulg c = crc; /* temporary variable */ unsigned n; uch *in, *out, ch; in = window; out = &output_data[output_ptr]; for (n = 0; n < outcnt; n++) { ch = *out++ = *in++; c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); } crc = c; bytes_out += (ulg)outcnt; output_ptr += (ulg)outcnt; outcnt = 0; } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted"); while(1); /* Halt */ } #define STACK_SIZE (4096) long user_stack [STACK_SIZE]; long* stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { output_data = 0; output_ptr = (unsigned long)0x400000; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; makecrc(); puts("Uncompressing Linux... "); gunzip(); puts("Ok, booting the kernel.\n"); }
gpl-2.0
AndreiLux/ref-3.10
fs/minix/itree_common.c
14550
7854
/* Generic part */ typedef struct { block_t *p; block_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline block_t *block_end(struct buffer_head *bh) { return (block_t *)((char*)bh->b_data + bh->b_size); } static inline Indirect *get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[DEPTH], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, i_data(inode) + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, block_to_cpu(p->key)); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int n = 0; int i; int parent = minix_new_block(inode); branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { struct buffer_head *bh; /* Allocate the next block */ int nr = minix_new_block(inode); if (!nr) break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; branch[n].p = (block_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); return -ENOSPC; } static inline int splice_branch(struct inode *inode, Indirect chain[DEPTH], Indirect *where, int num) { int i; write_lock(&pointers_lock); /* Verify that place we are splicing to is still there and vacant */ if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); /* We are done with atomic stuff, now do the rest of housekeeping */ inode->i_ctime = CURRENT_TIME_SEC; /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) minix_free_block(inode, block_to_cpu(where[i].key)); return -EAGAIN; } static inline int get_block(struct inode * inode, sector_t block, struct buffer_head *bh, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; int left; int depth = block_to_path(inode, block, offsets); if (depth == 0) goto out; reread: partial = get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(block_t *p, block_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[DEPTH], Indirect chain[DEPTH], block_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = get_branch(inode, k, offsets, chain, &err); write_lock(&pointers_lock); if (!partial) partial = chain + k-1; if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--) ; if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, block_t *p, block_t *q) { unsigned long nr; for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (nr) { *p = 0; minix_free_block(inode, nr); } } } static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); if (!bh) continue; free_branches(inode, (block_t*)bh->b_data, block_end(bh), depth); bforget(bh); minix_free_block(inode, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } static inline void truncate (struct inode * inode) { struct super_block *sb = inode->i_sb; block_t *idata = i_data(inode); int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; block_t nr = 0; int n; int first_whole; long iblock; iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (!n) return; if (n == 1) { free_data(inode, idata+offsets[0], idata + DIRECT); first_whole = 0; goto do_indirects; } first_whole = offsets[0] + 1 - DIRECT; partial = find_shared(inode, n, offsets, chain, &nr); if (nr) { if (partial == chain) mark_inode_dirty(inode); else mark_buffer_dirty_inode(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); mark_buffer_dirty_inode(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ while (first_whole < DEPTH-1) { nr = idata[DIRECT+first_whole]; if (nr) { idata[DIRECT+first_whole] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, first_whole+1); } first_whole++; } inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } static inline unsigned nblocks(loff_t size, struct super_block *sb) { int k = sb->s_blocksize_bits - 10; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); res = blocks; while (--i && blocks > direct) { blocks -= direct; blocks += sb->s_blocksize/sizeof(block_t) - 1; blocks /= sb->s_blocksize/sizeof(block_t); res += blocks; direct = 1; } return res; }
gpl-2.0
gianmarcorev/rpi_linux
drivers/extcon/extcon.c
215
28908
/* * drivers/extcon/extcon_class.c * * External connector (extcon) class driver * * Copyright (C) 2012 Samsung Electronics * Author: Donggeun Kim <dg77.kim@samsung.com> * Author: MyungJoo Ham <myungjoo.ham@samsung.com> * * based on android/drivers/switch/switch_class.c * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/extcon.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/sysfs.h> /* * extcon_cable_name suggests the standard cable names for commonly used * cable types. * * However, please do not use extcon_cable_name directly for extcon_dev * struct's supported_cable pointer unless your device really supports * every single port-type of the following cable names. Please choose cable * names that are actually used in your extcon device. */ const char extcon_cable_name[][CABLE_NAME_MAX + 1] = { [EXTCON_USB] = "USB", [EXTCON_USB_HOST] = "USB-Host", [EXTCON_TA] = "TA", [EXTCON_FAST_CHARGER] = "Fast-charger", [EXTCON_SLOW_CHARGER] = "Slow-charger", [EXTCON_CHARGE_DOWNSTREAM] = "Charge-downstream", [EXTCON_HDMI] = "HDMI", [EXTCON_MHL] = "MHL", [EXTCON_DVI] = "DVI", [EXTCON_VGA] = "VGA", [EXTCON_DOCK] = "Dock", [EXTCON_LINE_IN] = "Line-in", [EXTCON_LINE_OUT] = "Line-out", [EXTCON_MIC_IN] = "Microphone", [EXTCON_HEADPHONE_OUT] = "Headphone", [EXTCON_SPDIF_IN] = "SPDIF-in", [EXTCON_SPDIF_OUT] = "SPDIF-out", [EXTCON_VIDEO_IN] = "Video-in", [EXTCON_VIDEO_OUT] = "Video-out", [EXTCON_MECHANICAL] = "Mechanical", }; static struct class *extcon_class; #if defined(CONFIG_ANDROID) static struct class_compat *switch_class; #endif /* CONFIG_ANDROID */ static LIST_HEAD(extcon_dev_list); static DEFINE_MUTEX(extcon_dev_list_lock); /** * check_mutually_exclusive - Check if new_state violates mutually_exclusive * condition. * @edev: the extcon device * @new_state: new cable attach status for @edev * * Returns 0 if nothing violates. Returns the index + 1 for the first * violated condition. */ static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state) { int i = 0; if (!edev->mutually_exclusive) return 0; for (i = 0; edev->mutually_exclusive[i]; i++) { int weight; u32 correspondants = new_state & edev->mutually_exclusive[i]; /* calculate the total number of bits set */ weight = hweight32(correspondants); if (weight > 1) return i + 1; } return 0; } static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { int i, count = 0; struct extcon_dev *edev = dev_get_drvdata(dev); if (edev->print_state) { int ret = edev->print_state(edev, buf); if (ret >= 0) return ret; /* Use default if failed */ } if (edev->max_supported == 0) return sprintf(buf, "%u\n", edev->state); for (i = 0; i < SUPPORTED_CABLE_MAX; i++) { if (!edev->supported_cable[i]) break; count += sprintf(buf + count, "%s=%d\n", edev->supported_cable[i], !!(edev->state & (1 << i))); } return count; } static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 state; ssize_t ret = 0; struct extcon_dev *edev = dev_get_drvdata(dev); ret = sscanf(buf, "0x%x", &state); if (ret == 0) ret = -EINVAL; else ret = extcon_set_state(edev, state); if (ret < 0) return ret; return count; } static DEVICE_ATTR_RW(state); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct extcon_dev *edev = dev_get_drvdata(dev); /* Optional callback given by the user */ if (edev->print_name) { int ret = edev->print_name(edev, buf); if (ret >= 0) return ret; } return sprintf(buf, "%s\n", dev_name(&edev->dev)); } static DEVICE_ATTR_RO(name); static ssize_t cable_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_name); return sprintf(buf, "%s\n", cable->edev->supported_cable[cable->cable_index]); } static ssize_t cable_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_state); return sprintf(buf, "%d\n", extcon_get_cable_state_(cable->edev, cable->cable_index)); } /** * extcon_update_state() - Update the cable attach states of the extcon device * only for the masked bits. * @edev: the extcon device * @mask: the bit mask to designate updated bits. * @state: new cable attach status for @edev * * Changing the state sends uevent with environment variable containing * the name of extcon device (envp[0]) and the state output (envp[1]). * Tizen uses this format for extcon device to get events from ports. * Android uses this format as well. * * Note that the notifier provides which bits are changed in the state * variable with the val parameter (second) to the callback. */ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) { char name_buf[120]; char state_buf[120]; char *prop_buf; char *envp[3]; int env_offset = 0; int length; unsigned long flags; spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { u32 old_state = edev->state; if (check_mutually_exclusive(edev, (edev->state & ~mask) | (state & mask))) { spin_unlock_irqrestore(&edev->lock, flags); return -EPERM; } edev->state &= ~mask; edev->state |= state & mask; raw_notifier_call_chain(&edev->nh, old_state, edev); /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (prop_buf) { length = name_show(&edev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(name_buf, sizeof(name_buf), "NAME=%s", prop_buf); envp[env_offset++] = name_buf; } length = state_show(&edev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(state_buf, sizeof(state_buf), "STATE=%s", prop_buf); envp[env_offset++] = state_buf; } envp[env_offset] = NULL; /* Unlock early before uevent */ spin_unlock_irqrestore(&edev->lock, flags); kobject_uevent_env(&edev->dev.kobj, KOBJ_CHANGE, envp); free_page((unsigned long)prop_buf); } else { /* Unlock early before uevent */ spin_unlock_irqrestore(&edev->lock, flags); dev_err(&edev->dev, "out of memory in extcon_set_state\n"); kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE); } } else { /* No changes */ spin_unlock_irqrestore(&edev->lock, flags); } return 0; } EXPORT_SYMBOL_GPL(extcon_update_state); /** * extcon_set_state() - Set the cable attach states of the extcon device. * @edev: the extcon device * @state: new cable attach status for @edev * * Note that notifier provides which bits are changed in the state * variable with the val parameter (second) to the callback. */ int extcon_set_state(struct extcon_dev *edev, u32 state) { return extcon_update_state(edev, 0xffffffff, state); } EXPORT_SYMBOL_GPL(extcon_set_state); /** * extcon_find_cable_index() - Get the cable index based on the cable name. * @edev: the extcon device that has the cable. * @cable_name: cable name to be searched. * * Note that accessing a cable state based on cable_index is faster than * cable_name because using cable_name induces a loop with strncmp(). * Thus, when get/set_cable_state is repeatedly used, using cable_index * is recommended. */ int extcon_find_cable_index(struct extcon_dev *edev, const char *cable_name) { int i; if (edev->supported_cable) { for (i = 0; edev->supported_cable[i]; i++) { if (!strncmp(edev->supported_cable[i], cable_name, CABLE_NAME_MAX)) return i; } } return -EINVAL; } EXPORT_SYMBOL_GPL(extcon_find_cable_index); /** * extcon_get_cable_state_() - Get the status of a specific cable. * @edev: the extcon device that has the cable. * @index: cable index that can be retrieved by extcon_find_cable_index(). */ int extcon_get_cable_state_(struct extcon_dev *edev, int index) { if (index < 0 || (edev->max_supported && edev->max_supported <= index)) return -EINVAL; return !!(edev->state & (1 << index)); } EXPORT_SYMBOL_GPL(extcon_get_cable_state_); /** * extcon_get_cable_state() - Get the status of a specific cable. * @edev: the extcon device that has the cable. * @cable_name: cable name. * * Note that this is slower than extcon_get_cable_state_. */ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) { return extcon_get_cable_state_(edev, extcon_find_cable_index (edev, cable_name)); } EXPORT_SYMBOL_GPL(extcon_get_cable_state); /** * extcon_set_cable_state_() - Set the status of a specific cable. * @edev: the extcon device that has the cable. * @index: cable index that can be retrieved by * extcon_find_cable_index(). * @cable_state: the new cable status. The default semantics is * true: attached / false: detached. */ int extcon_set_cable_state_(struct extcon_dev *edev, int index, bool cable_state) { u32 state; if (index < 0 || (edev->max_supported && edev->max_supported <= index)) return -EINVAL; state = cable_state ? (1 << index) : 0; return extcon_update_state(edev, 1 << index, state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state_); /** * extcon_set_cable_state() - Set the status of a specific cable. * @edev: the extcon device that has the cable. * @cable_name: cable name. * @cable_state: the new cable status. The default semantics is * true: attached / false: detached. * * Note that this is slower than extcon_set_cable_state_. */ int extcon_set_cable_state(struct extcon_dev *edev, const char *cable_name, bool cable_state) { return extcon_set_cable_state_(edev, extcon_find_cable_index (edev, cable_name), cable_state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state); /** * extcon_get_extcon_dev() - Get the extcon device instance from the name * @extcon_name: The extcon name provided with extcon_dev_register() */ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { struct extcon_dev *sd; mutex_lock(&extcon_dev_list_lock); list_for_each_entry(sd, &extcon_dev_list, entry) { if (!strcmp(sd->name, extcon_name)) goto out; } sd = NULL; out: mutex_unlock(&extcon_dev_list_lock); return sd; } EXPORT_SYMBOL_GPL(extcon_get_extcon_dev); static int _call_per_cable(struct notifier_block *nb, unsigned long val, void *ptr) { struct extcon_specific_cable_nb *obj = container_of(nb, struct extcon_specific_cable_nb, internal_nb); struct extcon_dev *edev = ptr; if ((val & (1 << obj->cable_index)) != (edev->state & (1 << obj->cable_index))) { bool cable_state = true; obj->previous_value = val; if (val & (1 << obj->cable_index)) cable_state = false; return obj->user_nb->notifier_call(obj->user_nb, cable_state, ptr); } return NOTIFY_OK; } /** * extcon_register_interest() - Register a notifier for a state change of a * specific cable, not an entier set of cables of a * extcon device. * @obj: an empty extcon_specific_cable_nb object to be returned. * @extcon_name: the name of extcon device. * if NULL, extcon_register_interest will register * every cable with the target cable_name given. * @cable_name: the target cable name. * @nb: the notifier block to get notified. * * Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets * the struct for you. * * extcon_register_interest is a helper function for those who want to get * notification for a single specific cable's status change. If a user wants * to get notification for any changes of all cables of a extcon device, * he/she should use the general extcon_register_notifier(). * * Note that the second parameter given to the callback of nb (val) is * "old_state", not the current state. The current state can be retrieved * by looking at the third pameter (edev pointer)'s state value. */ int extcon_register_interest(struct extcon_specific_cable_nb *obj, const char *extcon_name, const char *cable_name, struct notifier_block *nb) { unsigned long flags; int ret; if (!obj || !cable_name || !nb) return -EINVAL; if (extcon_name) { obj->edev = extcon_get_extcon_dev(extcon_name); if (!obj->edev) return -ENODEV; obj->cable_index = extcon_find_cable_index(obj->edev, cable_name); if (obj->cable_index < 0) return obj->cable_index; obj->user_nb = nb; obj->internal_nb.notifier_call = _call_per_cable; spin_lock_irqsave(&obj->edev->lock, flags); ret = raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb); spin_unlock_irqrestore(&obj->edev->lock, flags); return ret; } else { struct class_dev_iter iter; struct extcon_dev *extd; struct device *dev; if (!extcon_class) return -ENODEV; class_dev_iter_init(&iter, extcon_class, NULL, NULL); while ((dev = class_dev_iter_next(&iter))) { extd = dev_get_drvdata(dev); if (extcon_find_cable_index(extd, cable_name) < 0) continue; class_dev_iter_exit(&iter); return extcon_register_interest(obj, extd->name, cable_name, nb); } return -ENODEV; } } EXPORT_SYMBOL_GPL(extcon_register_interest); /** * extcon_unregister_interest() - Unregister the notifier registered by * extcon_register_interest(). * @obj: the extcon_specific_cable_nb object returned by * extcon_register_interest(). */ int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) { unsigned long flags; int ret; if (!obj) return -EINVAL; spin_lock_irqsave(&obj->edev->lock, flags); ret = raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb); spin_unlock_irqrestore(&obj->edev->lock, flags); return ret; } EXPORT_SYMBOL_GPL(extcon_unregister_interest); /** * extcon_register_notifier() - Register a notifiee to get notified by * any attach status changes from the extcon. * @edev: the extcon device. * @nb: a notifier block to be registered. * * Note that the second parameter given to the callback of nb (val) is * "old_state", not the current state. The current state can be retrieved * by looking at the third pameter (edev pointer)'s state value. */ int extcon_register_notifier(struct extcon_dev *edev, struct notifier_block *nb) { unsigned long flags; int ret; spin_lock_irqsave(&edev->lock, flags); ret = raw_notifier_chain_register(&edev->nh, nb); spin_unlock_irqrestore(&edev->lock, flags); return ret; } EXPORT_SYMBOL_GPL(extcon_register_notifier); /** * extcon_unregister_notifier() - Unregister a notifiee from the extcon device. * @edev: the extcon device. * @nb: a registered notifier block to be unregistered. */ int extcon_unregister_notifier(struct extcon_dev *edev, struct notifier_block *nb) { unsigned long flags; int ret; spin_lock_irqsave(&edev->lock, flags); ret = raw_notifier_chain_unregister(&edev->nh, nb); spin_unlock_irqrestore(&edev->lock, flags); return ret; } EXPORT_SYMBOL_GPL(extcon_unregister_notifier); static struct attribute *extcon_attrs[] = { &dev_attr_state.attr, &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(extcon); static int create_extcon_class(void) { if (!extcon_class) { extcon_class = class_create(THIS_MODULE, "extcon"); if (IS_ERR(extcon_class)) return PTR_ERR(extcon_class); extcon_class->dev_groups = extcon_groups; #if defined(CONFIG_ANDROID) switch_class = class_compat_register("switch"); if (WARN(!switch_class, "cannot allocate")) return -ENOMEM; #endif /* CONFIG_ANDROID */ } return 0; } static void extcon_dev_release(struct device *dev) { } static const char *muex_name = "mutually_exclusive"; static void dummy_sysfs_dev_release(struct device *dev) { } /* * extcon_dev_allocate() - Allocate the memory of extcon device. * @supported_cable: Array of supported cable names ending with NULL. * If supported_cable is NULL, cable name related APIs * are disabled. * * This function allocates the memory for extcon device without allocating * memory in each extcon provider driver and initialize default setting for * extcon device. * * Return the pointer of extcon device if success or ERR_PTR(err) if fail */ struct extcon_dev *extcon_dev_allocate(const char **supported_cable) { struct extcon_dev *edev; edev = kzalloc(sizeof(*edev), GFP_KERNEL); if (!edev) return ERR_PTR(-ENOMEM); edev->max_supported = 0; edev->supported_cable = supported_cable; return edev; } /* * extcon_dev_free() - Free the memory of extcon device. * @edev: the extcon device to free */ void extcon_dev_free(struct extcon_dev *edev) { kfree(edev); } EXPORT_SYMBOL_GPL(extcon_dev_free); static int devm_extcon_dev_match(struct device *dev, void *res, void *data) { struct extcon_dev **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } static void devm_extcon_dev_release(struct device *dev, void *res) { extcon_dev_free(*(struct extcon_dev **)res); } /** * devm_extcon_dev_allocate - Allocate managed extcon device * @dev: device owning the extcon device being created * @supported_cable: Array of supported cable names ending with NULL. * If supported_cable is NULL, cable name related APIs * are disabled. * * This function manages automatically the memory of extcon device using device * resource management and simplify the control of freeing the memory of extcon * device. * * Returns the pointer memory of allocated extcon_dev if success * or ERR_PTR(err) if fail */ struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, const char **supported_cable) { struct extcon_dev **ptr, *edev; ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); edev = extcon_dev_allocate(supported_cable); if (IS_ERR(edev)) { devres_free(ptr); return edev; } edev->dev.parent = dev; *ptr = edev; devres_add(dev, ptr); return edev; } EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate); void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev) { WARN_ON(devres_release(dev, devm_extcon_dev_release, devm_extcon_dev_match, edev)); } EXPORT_SYMBOL_GPL(devm_extcon_dev_free); /** * extcon_dev_register() - Register a new extcon device * @edev : the new extcon device (should be allocated before calling) * * Among the members of edev struct, please set the "user initializing data" * in any case and set the "optional callbacks" if required. However, please * do not set the values of "internal data", which are initialized by * this function. */ int extcon_dev_register(struct extcon_dev *edev) { int ret, index = 0; if (!extcon_class) { ret = create_extcon_class(); if (ret < 0) return ret; } if (edev->supported_cable) { /* Get size of array */ for (index = 0; edev->supported_cable[index]; index++) ; edev->max_supported = index; } else { edev->max_supported = 0; } if (index > SUPPORTED_CABLE_MAX) { dev_err(&edev->dev, "extcon: maximum number of supported cables exceeded.\n"); return -EINVAL; } edev->dev.class = extcon_class; edev->dev.release = extcon_dev_release; edev->name = edev->name ? edev->name : dev_name(edev->dev.parent); if (IS_ERR_OR_NULL(edev->name)) { dev_err(&edev->dev, "extcon device name is null\n"); return -EINVAL; } dev_set_name(&edev->dev, "%s", edev->name); if (edev->max_supported) { char buf[10]; char *str; struct extcon_cable *cable; edev->cables = kzalloc(sizeof(struct extcon_cable) * edev->max_supported, GFP_KERNEL); if (!edev->cables) { ret = -ENOMEM; goto err_sysfs_alloc; } for (index = 0; index < edev->max_supported; index++) { cable = &edev->cables[index]; snprintf(buf, 10, "cable.%d", index); str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); if (!str) { for (index--; index >= 0; index--) { cable = &edev->cables[index]; kfree(cable->attr_g.name); } ret = -ENOMEM; goto err_alloc_cables; } strcpy(str, buf); cable->edev = edev; cable->cable_index = index; cable->attrs[0] = &cable->attr_name.attr; cable->attrs[1] = &cable->attr_state.attr; cable->attrs[2] = NULL; cable->attr_g.name = str; cable->attr_g.attrs = cable->attrs; sysfs_attr_init(&cable->attr_name.attr); cable->attr_name.attr.name = "name"; cable->attr_name.attr.mode = 0444; cable->attr_name.show = cable_name_show; sysfs_attr_init(&cable->attr_state.attr); cable->attr_state.attr.name = "state"; cable->attr_state.attr.mode = 0444; cable->attr_state.show = cable_state_show; } } if (edev->max_supported && edev->mutually_exclusive) { char buf[80]; char *name; /* Count the size of mutually_exclusive array */ for (index = 0; edev->mutually_exclusive[index]; index++) ; edev->attrs_muex = kzalloc(sizeof(struct attribute *) * (index + 1), GFP_KERNEL); if (!edev->attrs_muex) { ret = -ENOMEM; goto err_muex; } edev->d_attrs_muex = kzalloc(sizeof(struct device_attribute) * index, GFP_KERNEL); if (!edev->d_attrs_muex) { ret = -ENOMEM; kfree(edev->attrs_muex); goto err_muex; } for (index = 0; edev->mutually_exclusive[index]; index++) { sprintf(buf, "0x%x", edev->mutually_exclusive[index]); name = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); if (!name) { for (index--; index >= 0; index--) { kfree(edev->d_attrs_muex[index].attr. name); } kfree(edev->d_attrs_muex); kfree(edev->attrs_muex); ret = -ENOMEM; goto err_muex; } strcpy(name, buf); sysfs_attr_init(&edev->d_attrs_muex[index].attr); edev->d_attrs_muex[index].attr.name = name; edev->d_attrs_muex[index].attr.mode = 0000; edev->attrs_muex[index] = &edev->d_attrs_muex[index] .attr; } edev->attr_g_muex.name = muex_name; edev->attr_g_muex.attrs = edev->attrs_muex; } if (edev->max_supported) { edev->extcon_dev_type.groups = kzalloc(sizeof(struct attribute_group *) * (edev->max_supported + 2), GFP_KERNEL); if (!edev->extcon_dev_type.groups) { ret = -ENOMEM; goto err_alloc_groups; } edev->extcon_dev_type.name = dev_name(&edev->dev); edev->extcon_dev_type.release = dummy_sysfs_dev_release; for (index = 0; index < edev->max_supported; index++) edev->extcon_dev_type.groups[index] = &edev->cables[index].attr_g; if (edev->mutually_exclusive) edev->extcon_dev_type.groups[index] = &edev->attr_g_muex; edev->dev.type = &edev->extcon_dev_type; } ret = device_register(&edev->dev); if (ret) { put_device(&edev->dev); goto err_dev; } #if defined(CONFIG_ANDROID) if (switch_class) ret = class_compat_create_link(switch_class, &edev->dev, NULL); #endif /* CONFIG_ANDROID */ spin_lock_init(&edev->lock); RAW_INIT_NOTIFIER_HEAD(&edev->nh); dev_set_drvdata(&edev->dev, edev); edev->state = 0; mutex_lock(&extcon_dev_list_lock); list_add(&edev->entry, &extcon_dev_list); mutex_unlock(&extcon_dev_list_lock); return 0; err_dev: if (edev->max_supported) kfree(edev->extcon_dev_type.groups); err_alloc_groups: if (edev->max_supported && edev->mutually_exclusive) { for (index = 0; edev->mutually_exclusive[index]; index++) kfree(edev->d_attrs_muex[index].attr.name); kfree(edev->d_attrs_muex); kfree(edev->attrs_muex); } err_muex: for (index = 0; index < edev->max_supported; index++) kfree(edev->cables[index].attr_g.name); err_alloc_cables: if (edev->max_supported) kfree(edev->cables); err_sysfs_alloc: return ret; } EXPORT_SYMBOL_GPL(extcon_dev_register); /** * extcon_dev_unregister() - Unregister the extcon device. * @edev: the extcon device instance to be unregistered. * * Note that this does not call kfree(edev) because edev was not allocated * by this class. */ void extcon_dev_unregister(struct extcon_dev *edev) { int index; mutex_lock(&extcon_dev_list_lock); list_del(&edev->entry); mutex_unlock(&extcon_dev_list_lock); if (IS_ERR_OR_NULL(get_device(&edev->dev))) { dev_err(&edev->dev, "Failed to unregister extcon_dev (%s)\n", dev_name(&edev->dev)); return; } device_unregister(&edev->dev); if (edev->mutually_exclusive && edev->max_supported) { for (index = 0; edev->mutually_exclusive[index]; index++) kfree(edev->d_attrs_muex[index].attr.name); kfree(edev->d_attrs_muex); kfree(edev->attrs_muex); } for (index = 0; index < edev->max_supported; index++) kfree(edev->cables[index].attr_g.name); if (edev->max_supported) { kfree(edev->extcon_dev_type.groups); kfree(edev->cables); } #if defined(CONFIG_ANDROID) if (switch_class) class_compat_remove_link(switch_class, &edev->dev, NULL); #endif put_device(&edev->dev); } EXPORT_SYMBOL_GPL(extcon_dev_unregister); static void devm_extcon_dev_unreg(struct device *dev, void *res) { extcon_dev_unregister(*(struct extcon_dev **)res); } /** * devm_extcon_dev_register() - Resource-managed extcon_dev_register() * @dev: device to allocate extcon device * @edev: the new extcon device to register * * Managed extcon_dev_register() function. If extcon device is attached with * this function, that extcon device is automatically unregistered on driver * detach. Internally this function calls extcon_dev_register() function. * To get more information, refer that function. * * If extcon device is registered with this function and the device needs to be * unregistered separately, devm_extcon_dev_unregister() should be used. * * Returns 0 if success or negaive error number if failure. */ int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev) { struct extcon_dev **ptr; int ret; ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = extcon_dev_register(edev); if (ret) { devres_free(ptr); return ret; } *ptr = edev; devres_add(dev, ptr); return 0; } EXPORT_SYMBOL_GPL(devm_extcon_dev_register); /** * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister() * @dev: device the extcon belongs to * @edev: the extcon device to unregister * * Unregister extcon device that is registered with devm_extcon_dev_register() * function. */ void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) { WARN_ON(devres_release(dev, devm_extcon_dev_unreg, devm_extcon_dev_match, edev)); } EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister); #ifdef CONFIG_OF /* * extcon_get_edev_by_phandle - Get the extcon device from devicetree * @dev - instance to the given device * @index - index into list of extcon_dev * * return the instance of extcon device */ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) { struct device_node *node; struct extcon_dev *edev; if (!dev->of_node) { dev_err(dev, "device does not have a device node entry\n"); return ERR_PTR(-EINVAL); } node = of_parse_phandle(dev->of_node, "extcon", index); if (!node) { dev_err(dev, "failed to get phandle in %s node\n", dev->of_node->full_name); return ERR_PTR(-ENODEV); } mutex_lock(&extcon_dev_list_lock); list_for_each_entry(edev, &extcon_dev_list, entry) { if (edev->dev.parent && edev->dev.parent->of_node == node) { mutex_unlock(&extcon_dev_list_lock); return edev; } } mutex_unlock(&extcon_dev_list_lock); return ERR_PTR(-EPROBE_DEFER); } #else struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle); static int __init extcon_class_init(void) { return create_extcon_class(); } module_init(extcon_class_init); static void __exit extcon_class_exit(void) { #if defined(CONFIG_ANDROID) class_compat_unregister(switch_class); #endif class_destroy(extcon_class); } module_exit(extcon_class_exit); MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_DESCRIPTION("External connector (extcon) class driver"); MODULE_LICENSE("GPL");
gpl-2.0
ardatdat/archos-kernel
drivers/s390/net/qeth_core_mpc.c
215
8975
/* * drivers/s390/net/qeth_core_mpc.c * * Copyright IBM Corp. 2007 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, * Frank Blaschka <frank.blaschka@de.ibm.com> */ #include <linux/module.h> #include <asm/cio.h> #include "qeth_core_mpc.h" unsigned char IDX_ACTIVATE_READ[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, 0x00, 0x00 }; unsigned char IDX_ACTIVATE_WRITE[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, 0x00, 0x00 }; unsigned char CM_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23, 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40, 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x04, 0x01, 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f, 0x00, 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; unsigned char CM_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24, 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40, 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x04, 0x05, 0x00, 0x01, 0x01, 0x11, 0x00, 0x09, 0x04, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x04, 0x06, 0xc8, 0x00 }; unsigned char ULP_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b, 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40, 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x04, 0x01, 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12, 0x00, 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff, 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7, 0xf1, 0x00, 0x00 }; unsigned char ULP_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c, 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40, 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x04, 0x05, 0x00, 0x01, 0x01, 0x14, 0x00, 0x09, 0x04, 0x05, 0x05, 0x30, 0x01, 0x00, 0x00, 0x00, 0x06, 0x04, 0x06, 0x40, 0x00, 0x00, 0x08, 0x04, 0x0b, 0x00, 0x00, 0x00, 0x00 }; unsigned char DM_ACT[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15, 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40, 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x04, 0x04, 0x05, 0x40, 0x01, 0x01, 0x00 }; unsigned char IPA_PDU_HEADER[] = { 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256, (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, sizeof(struct qeth_ipa_cmd) / 256, sizeof(struct qeth_ipa_cmd) % 256, 0x00, sizeof(struct qeth_ipa_cmd) / 256, sizeof(struct qeth_ipa_cmd) % 256, 0x05, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, sizeof(struct qeth_ipa_cmd) / 256, sizeof(struct qeth_ipa_cmd) % 256, 0x00, 0x00, 0x00, 0x40, }; EXPORT_SYMBOL_GPL(IPA_PDU_HEADER); unsigned char WRITE_CCW[] = { 0x01, CCW_FLAG_SLI, 0, 0, 0, 0, 0, 0 }; unsigned char READ_CCW[] = { 0x02, CCW_FLAG_SLI, 0, 0, 0, 0, 0, 0 }; struct ipa_rc_msg { enum qeth_ipa_return_codes rc; char *msg; }; static struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_SUCCESS, "success"}, {IPA_RC_NOTSUPP, "Command not supported"}, {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"}, {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"}, {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"}, {IPA_RC_UNREGISTERED_ADDR, "Address not registered"}, {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"}, {IPA_RC_ID_NOT_FOUND, "Identifier not found"}, {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"}, {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"}, {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"}, {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"}, {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"}, {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"}, {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"}, {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"}, {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}, {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"}, {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"}, {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"}, {IPA_RC_INVALID_LANNUM, "Invalid LAN num"}, {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"}, {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"}, {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"}, {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}, {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"}, {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"}, {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, {IPA_RC_FFFF, "Unknown Error"} }; char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) { int x = 0; qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) / sizeof(struct ipa_rc_msg) - 1].rc = rc; while (qeth_ipa_rc_msg[x].rc != rc) x++; return qeth_ipa_rc_msg[x].msg; } struct ipa_cmd_names { enum qeth_ipa_cmds cmd; char *name; }; static struct ipa_cmd_names qeth_ipa_cmd_names[] = { {IPA_CMD_STARTLAN, "startlan"}, {IPA_CMD_STOPLAN, "stoplan"}, {IPA_CMD_SETVMAC, "setvmac"}, {IPA_CMD_DELVMAC, "delvmac"}, {IPA_CMD_SETGMAC, "setgmac"}, {IPA_CMD_DELGMAC, "delgmac"}, {IPA_CMD_SETVLAN, "setvlan"}, {IPA_CMD_DELVLAN, "delvlan"}, {IPA_CMD_SETCCID, "setccid"}, {IPA_CMD_DELCCID, "delccid"}, {IPA_CMD_MODCCID, "modccid"}, {IPA_CMD_SETIP, "setip"}, {IPA_CMD_QIPASSIST, "qipassist"}, {IPA_CMD_SETASSPARMS, "setassparms"}, {IPA_CMD_SETIPM, "setipm"}, {IPA_CMD_DELIPM, "delipm"}, {IPA_CMD_SETRTG, "setrtg"}, {IPA_CMD_DELIP, "delip"}, {IPA_CMD_SETADAPTERPARMS, "setadapterparms"}, {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"}, {IPA_CMD_CREATE_ADDR, "create_addr"}, {IPA_CMD_DESTROY_ADDR, "destroy_addr"}, {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"}, {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"}, {IPA_CMD_UNKNOWN, "unknown"}, }; char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) { int x = 0; qeth_ipa_cmd_names[ sizeof(qeth_ipa_cmd_names) / sizeof(struct ipa_cmd_names)-1].cmd = cmd; while (qeth_ipa_cmd_names[x].cmd != cmd) x++; return qeth_ipa_cmd_names[x].name; }
gpl-2.0
lexi6725/u-boot-2012-10
drivers/serial/serial_lh7a40x.c
215
3869
/* * (C) Copyright 2002 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <common.h> #include <lh7a40x.h> DECLARE_GLOBAL_DATA_PTR; #if defined(CONFIG_CONSOLE_UART1) # define UART_CONSOLE 1 #elif defined(CONFIG_CONSOLE_UART2) # define UART_CONSOLE 2 #elif defined(CONFIG_CONSOLE_UART3) # define UART_CONSOLE 3 #else # error "No console configured ... " #endif void serial_setbrg (void) { lh7a40x_uart_t* uart = LH7A40X_UART_PTR(UART_CONSOLE); int i; unsigned int reg = 0; /* * userguide 15.1.2.4 * * BAUDDIV is (UART_REF_FREQ/(16 X BAUD))-1 * * UART_REF_FREQ = external system clock input / 2 (Hz) * BAUD is desired baudrate (bits/s) * * NOTE: we add (divisor/2) to numerator to round for * more precision */ reg = (((get_PLLCLK()/2) + ((16*gd->baudrate)/2)) / (16 * gd->baudrate)) - 1; uart->brcon = reg; for (i = 0; i < 100; i++); } /* * Initialise the serial port with the given baudrate. The settings * are always 8 data bits, no parity, 1 stop bit, no start bits. * */ int serial_init (void) { lh7a40x_uart_t* uart = LH7A40X_UART_PTR(UART_CONSOLE); /* UART must be enabled before writing to any config registers */ uart->con |= (UART_EN); #ifdef CONFIG_CONSOLE_UART1 /* infrared disabled */ uart->con |= UART_SIRD; #endif /* loopback disabled */ uart->con &= ~(UART_LBE); /* modem lines and tx/rx polarities */ uart->con &= ~(UART_MXP | UART_TXP | UART_RXP); /* FIFO enable, N81 */ uart->fcon = (UART_WLEN_8 | UART_FEN | UART_STP2_1); /* set baudrate */ serial_setbrg (); /* enable rx interrupt */ uart->inten |= UART_RI; return (0); } /* * Read a single byte from the serial port. Returns 1 on success, 0 * otherwise. When the function is succesfull, the character read is * written into its argument c. */ int serial_getc (void) { lh7a40x_uart_t* uart = LH7A40X_UART_PTR(UART_CONSOLE); /* wait for character to arrive */ while (uart->status & UART_RXFE); return(uart->data & 0xff); } #ifdef CONFIG_HWFLOW static int hwflow = 0; /* turned off by default */ int hwflow_onoff(int on) { switch(on) { case 0: default: break; /* return current */ case 1: hwflow = 1; /* turn on */ break; case -1: hwflow = 0; /* turn off */ break; } return hwflow; } #endif #ifdef CONFIG_MODEM_SUPPORT static int be_quiet = 0; void disable_putc(void) { be_quiet = 1; } void enable_putc(void) { be_quiet = 0; } #endif /* * Output a single byte to the serial port. */ void serial_putc (const char c) { lh7a40x_uart_t* uart = LH7A40X_UART_PTR(UART_CONSOLE); #ifdef CONFIG_MODEM_SUPPORT if (be_quiet) return; #endif /* wait for room in the tx FIFO */ while (!(uart->status & UART_TXFE)); #ifdef CONFIG_HWFLOW /* Wait for CTS up */ while(hwflow && !(uart->status & UART_CTS)); #endif uart->data = c; /* If \n, also do \r */ if (c == '\n') serial_putc ('\r'); } /* * Test whether a character is in the RX buffer */ int serial_tstc (void) { lh7a40x_uart_t* uart = LH7A40X_UART_PTR(UART_CONSOLE); return(!(uart->status & UART_RXFE)); } void serial_puts (const char *s) { while (*s) { serial_putc (*s++); } }
gpl-2.0
tytung/android_kernel_htcleo-2.6.32
drivers/rtc/rtc-ds1305.c
471
21994
/* * rtc-ds1305.c -- driver for DS1305 and DS1306 SPI RTC chips * * Copyright (C) 2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> #include <linux/spi/ds1305.h> /* * Registers ... mask DS1305_WRITE into register address to write, * otherwise you're reading it. All non-bitmask values are BCD. */ #define DS1305_WRITE 0x80 /* RTC date/time ... the main special cases are that we: * - Need fancy "hours" encoding in 12hour mode * - Don't rely on the "day-of-week" field (or tm_wday) * - Are a 21st-century clock (2000 <= year < 2100) */ #define DS1305_RTC_LEN 7 /* bytes for RTC regs */ #define DS1305_SEC 0x00 /* register addresses */ #define DS1305_MIN 0x01 #define DS1305_HOUR 0x02 # define DS1305_HR_12 0x40 /* set == 12 hr mode */ # define DS1305_HR_PM 0x20 /* set == PM (12hr mode) */ #define DS1305_WDAY 0x03 #define DS1305_MDAY 0x04 #define DS1305_MON 0x05 #define DS1305_YEAR 0x06 /* The two alarms have only sec/min/hour/wday fields (ALM_LEN). * DS1305_ALM_DISABLE disables a match field (some combos are bad). * * NOTE that since we don't use WDAY, we limit ourselves to alarms * only one day into the future (vs potentially up to a week). * * NOTE ALSO that while we could generate once-a-second IRQs (UIE), we * don't currently support them. We'd either need to do it only when * no alarm is pending (not the standard model), or to use the second * alarm (implying that this is a DS1305 not DS1306, *and* that either * it's wired up a second IRQ we know, or that INTCN is set) */ #define DS1305_ALM_LEN 4 /* bytes for ALM regs */ #define DS1305_ALM_DISABLE 0x80 #define DS1305_ALM0(r) (0x07 + (r)) /* register addresses */ #define DS1305_ALM1(r) (0x0b + (r)) /* three control registers */ #define DS1305_CONTROL_LEN 3 /* bytes of control regs */ #define DS1305_CONTROL 0x0f /* register addresses */ # define DS1305_nEOSC 0x80 /* low enables oscillator */ # define DS1305_WP 0x40 /* write protect */ # define DS1305_INTCN 0x04 /* clear == only int0 used */ # define DS1306_1HZ 0x04 /* enable 1Hz output */ # define DS1305_AEI1 0x02 /* enable ALM1 IRQ */ # define DS1305_AEI0 0x01 /* enable ALM0 IRQ */ #define DS1305_STATUS 0x10 /* status has just AEIx bits, mirrored as IRQFx */ #define DS1305_TRICKLE 0x11 /* trickle bits are defined in <linux/spi/ds1305.h> */ /* a bunch of NVRAM */ #define DS1305_NVRAM_LEN 96 /* bytes of NVRAM */ #define DS1305_NVRAM 0x20 /* register addresses */ struct ds1305 { struct spi_device *spi; struct rtc_device *rtc; struct work_struct work; unsigned long flags; #define FLAG_EXITING 0 bool hr12; u8 ctrl[DS1305_CONTROL_LEN]; }; /*----------------------------------------------------------------------*/ /* * Utilities ... tolerate 12-hour AM/PM notation in case of non-Linux * software (like a bootloader) which may require it. */ static unsigned bcd2hour(u8 bcd) { if (bcd & DS1305_HR_12) { unsigned hour = 0; bcd &= ~DS1305_HR_12; if (bcd & DS1305_HR_PM) { hour = 12; bcd &= ~DS1305_HR_PM; } hour += bcd2bin(bcd); return hour - 1; } return bcd2bin(bcd); } static u8 hour2bcd(bool hr12, int hour) { if (hr12) { hour++; if (hour <= 12) return DS1305_HR_12 | bin2bcd(hour); hour -= 12; return DS1305_HR_12 | DS1305_HR_PM | bin2bcd(hour); } return bin2bcd(hour); } /*----------------------------------------------------------------------*/ /* * Interface to RTC framework */ #ifdef CONFIG_RTC_INTF_DEV /* * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) */ static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg) { struct ds1305 *ds1305 = dev_get_drvdata(dev); u8 buf[2]; int status = -ENOIOCTLCMD; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; switch (cmd) { case RTC_AIE_OFF: status = 0; if (!(buf[1] & DS1305_AEI0)) goto done; buf[1] &= ~DS1305_AEI0; break; case RTC_AIE_ON: status = 0; if (ds1305->ctrl[0] & DS1305_AEI0) goto done; buf[1] |= DS1305_AEI0; break; } if (status == 0) { status = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); if (status >= 0) ds1305->ctrl[0] = buf[1]; } done: return status; } #else #define ds1305_ioctl NULL #endif /* * Get/set of date and time is pretty normal. */ static int ds1305_get_time(struct device *dev, struct rtc_time *time) { struct ds1305 *ds1305 = dev_get_drvdata(dev); u8 addr = DS1305_SEC; u8 buf[DS1305_RTC_LEN]; int status; /* Use write-then-read to get all the date/time registers * since dma from stack is nonportable */ status = spi_write_then_read(ds1305->spi, &addr, sizeof addr, buf, sizeof buf); if (status < 0) return status; dev_vdbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n", "read", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); /* Decode the registers */ time->tm_sec = bcd2bin(buf[DS1305_SEC]); time->tm_min = bcd2bin(buf[DS1305_MIN]); time->tm_hour = bcd2hour(buf[DS1305_HOUR]); time->tm_wday = buf[DS1305_WDAY] - 1; time->tm_mday = bcd2bin(buf[DS1305_MDAY]); time->tm_mon = bcd2bin(buf[DS1305_MON]) - 1; time->tm_year = bcd2bin(buf[DS1305_YEAR]) + 100; dev_vdbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "read", time->tm_sec, time->tm_min, time->tm_hour, time->tm_mday, time->tm_mon, time->tm_year, time->tm_wday); /* Time may not be set */ return rtc_valid_tm(time); } static int ds1305_set_time(struct device *dev, struct rtc_time *time) { struct ds1305 *ds1305 = dev_get_drvdata(dev); u8 buf[1 + DS1305_RTC_LEN]; u8 *bp = buf; dev_vdbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "write", time->tm_sec, time->tm_min, time->tm_hour, time->tm_mday, time->tm_mon, time->tm_year, time->tm_wday); /* Write registers starting at the first time/date address. */ *bp++ = DS1305_WRITE | DS1305_SEC; *bp++ = bin2bcd(time->tm_sec); *bp++ = bin2bcd(time->tm_min); *bp++ = hour2bcd(ds1305->hr12, time->tm_hour); *bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1; *bp++ = bin2bcd(time->tm_mday); *bp++ = bin2bcd(time->tm_mon + 1); *bp++ = bin2bcd(time->tm_year - 100); dev_dbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n", "write", buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); /* use write-then-read since dma from stack is nonportable */ return spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); } /* * Get/set of alarm is a bit funky: * * - First there's the inherent raciness of getting the (partitioned) * status of an alarm that could trigger while we're reading parts * of that status. * * - Second there's its limited range (we could increase it a bit by * relying on WDAY), which means it will easily roll over. * * - Third there's the choice of two alarms and alarm signals. * Here we use ALM0 and expect that nINT0 (open drain) is used; * that's the only real option for DS1306 runtime alarms, and is * natural on DS1305. * * - Fourth, there's also ALM1, and a second interrupt signal: * + On DS1305 ALM1 uses nINT1 (when INTCN=1) else nINT0; * + On DS1306 ALM1 only uses INT1 (an active high pulse) * and it won't work when VCC1 is active. * * So to be most general, we should probably set both alarms to the * same value, letting ALM1 be the wakeup event source on DS1306 * and handling several wiring options on DS1305. * * - Fifth, we support the polled mode (as well as possible; why not?) * even when no interrupt line is wired to an IRQ. */ /* * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) */ static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct ds1305 *ds1305 = dev_get_drvdata(dev); struct spi_device *spi = ds1305->spi; u8 addr; int status; u8 buf[DS1305_ALM_LEN]; /* Refresh control register cache BEFORE reading ALM0 registers, * since reading alarm registers acks any pending IRQ. That * makes returning "pending" status a bit of a lie, but that bit * of EFI status is at best fragile anyway (given IRQ handlers). */ addr = DS1305_CONTROL; status = spi_write_then_read(spi, &addr, sizeof addr, ds1305->ctrl, sizeof ds1305->ctrl); if (status < 0) return status; alm->enabled = !!(ds1305->ctrl[0] & DS1305_AEI0); alm->pending = !!(ds1305->ctrl[1] & DS1305_AEI0); /* get and check ALM0 registers */ addr = DS1305_ALM0(DS1305_SEC); status = spi_write_then_read(spi, &addr, sizeof addr, buf, sizeof buf); if (status < 0) return status; dev_vdbg(dev, "%s: %02x %02x %02x %02x\n", "alm0 read", buf[DS1305_SEC], buf[DS1305_MIN], buf[DS1305_HOUR], buf[DS1305_WDAY]); if ((DS1305_ALM_DISABLE & buf[DS1305_SEC]) || (DS1305_ALM_DISABLE & buf[DS1305_MIN]) || (DS1305_ALM_DISABLE & buf[DS1305_HOUR])) return -EIO; /* Stuff these values into alm->time and let RTC framework code * fill in the rest ... and also handle rollover to tomorrow when * that's needed. */ alm->time.tm_sec = bcd2bin(buf[DS1305_SEC]); alm->time.tm_min = bcd2bin(buf[DS1305_MIN]); alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]); alm->time.tm_mday = -1; alm->time.tm_mon = -1; alm->time.tm_year = -1; /* next three fields are unused by Linux */ alm->time.tm_wday = -1; alm->time.tm_mday = -1; alm->time.tm_isdst = -1; return 0; } /* * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) */ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct ds1305 *ds1305 = dev_get_drvdata(dev); struct spi_device *spi = ds1305->spi; unsigned long now, later; struct rtc_time tm; int status; u8 buf[1 + DS1305_ALM_LEN]; /* convert desired alarm to time_t */ status = rtc_tm_to_time(&alm->time, &later); if (status < 0) return status; /* Read current time as time_t */ status = ds1305_get_time(dev, &tm); if (status < 0) return status; status = rtc_tm_to_time(&tm, &now); if (status < 0) return status; /* make sure alarm fires within the next 24 hours */ if (later <= now) return -EINVAL; if ((later - now) > 24 * 60 * 60) return -EDOM; /* disable alarm if needed */ if (ds1305->ctrl[0] & DS1305_AEI0) { ds1305->ctrl[0] &= ~DS1305_AEI0; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0); if (status < 0) return status; } /* write alarm */ buf[0] = DS1305_WRITE | DS1305_ALM0(DS1305_SEC); buf[1 + DS1305_SEC] = bin2bcd(alm->time.tm_sec); buf[1 + DS1305_MIN] = bin2bcd(alm->time.tm_min); buf[1 + DS1305_HOUR] = hour2bcd(ds1305->hr12, alm->time.tm_hour); buf[1 + DS1305_WDAY] = DS1305_ALM_DISABLE; dev_dbg(dev, "%s: %02x %02x %02x %02x\n", "alm0 write", buf[1 + DS1305_SEC], buf[1 + DS1305_MIN], buf[1 + DS1305_HOUR], buf[1 + DS1305_WDAY]); status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); if (status < 0) return status; /* enable alarm if requested */ if (alm->enabled) { ds1305->ctrl[0] |= DS1305_AEI0; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0); } return status; } #ifdef CONFIG_PROC_FS static int ds1305_proc(struct device *dev, struct seq_file *seq) { struct ds1305 *ds1305 = dev_get_drvdata(dev); char *diodes = "no"; char *resistors = ""; /* ctrl[2] is treated as read-only; no locking needed */ if ((ds1305->ctrl[2] & 0xf0) == DS1305_TRICKLE_MAGIC) { switch (ds1305->ctrl[2] & 0x0c) { case DS1305_TRICKLE_DS2: diodes = "2 diodes, "; break; case DS1305_TRICKLE_DS1: diodes = "1 diode, "; break; default: goto done; } switch (ds1305->ctrl[2] & 0x03) { case DS1305_TRICKLE_2K: resistors = "2k Ohm"; break; case DS1305_TRICKLE_4K: resistors = "4k Ohm"; break; case DS1305_TRICKLE_8K: resistors = "8k Ohm"; break; default: diodes = "no"; break; } } done: return seq_printf(seq, "trickle_charge\t: %s%s\n", diodes, resistors); } #else #define ds1305_proc NULL #endif static const struct rtc_class_ops ds1305_ops = { .ioctl = ds1305_ioctl, .read_time = ds1305_get_time, .set_time = ds1305_set_time, .read_alarm = ds1305_get_alarm, .set_alarm = ds1305_set_alarm, .proc = ds1305_proc, }; static void ds1305_work(struct work_struct *work) { struct ds1305 *ds1305 = container_of(work, struct ds1305, work); struct mutex *lock = &ds1305->rtc->ops_lock; struct spi_device *spi = ds1305->spi; u8 buf[3]; int status; /* lock to protect ds1305->ctrl */ mutex_lock(lock); /* Disable the IRQ, and clear its status ... for now, we "know" * that if more than one alarm is active, they're in sync. * Note that reading ALM data registers also clears IRQ status. */ ds1305->ctrl[0] &= ~(DS1305_AEI1 | DS1305_AEI0); ds1305->ctrl[1] = 0; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; buf[2] = 0; status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); if (status < 0) dev_dbg(&spi->dev, "clear irq --> %d\n", status); mutex_unlock(lock); if (!test_bit(FLAG_EXITING, &ds1305->flags)) enable_irq(spi->irq); rtc_update_irq(ds1305->rtc, 1, RTC_AF | RTC_IRQF); } /* * This "real" IRQ handler hands off to a workqueue mostly to allow * mutex locking for ds1305->ctrl ... unlike I2C, we could issue async * I/O requests in IRQ context (to clear the IRQ status). */ static irqreturn_t ds1305_irq(int irq, void *p) { struct ds1305 *ds1305 = p; disable_irq(irq); schedule_work(&ds1305->work); return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ /* * Interface for NVRAM */ static void msg_init(struct spi_message *m, struct spi_transfer *x, u8 *addr, size_t count, char *tx, char *rx) { spi_message_init(m); memset(x, 0, 2 * sizeof(*x)); x->tx_buf = addr; x->len = 1; spi_message_add_tail(x, m); x++; x->tx_buf = tx; x->rx_buf = rx; x->len = count; spi_message_add_tail(x, m); } static ssize_t ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct spi_device *spi; u8 addr; struct spi_message m; struct spi_transfer x[2]; int status; spi = container_of(kobj, struct spi_device, dev.kobj); if (unlikely(off >= DS1305_NVRAM_LEN)) return 0; if (count >= DS1305_NVRAM_LEN) count = DS1305_NVRAM_LEN; if ((off + count) > DS1305_NVRAM_LEN) count = DS1305_NVRAM_LEN - off; if (unlikely(!count)) return count; addr = DS1305_NVRAM + off; msg_init(&m, x, &addr, count, NULL, buf); status = spi_sync(spi, &m); if (status < 0) dev_err(&spi->dev, "nvram %s error %d\n", "read", status); return (status < 0) ? status : count; } static ssize_t ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct spi_device *spi; u8 addr; struct spi_message m; struct spi_transfer x[2]; int status; spi = container_of(kobj, struct spi_device, dev.kobj); if (unlikely(off >= DS1305_NVRAM_LEN)) return -EFBIG; if (count >= DS1305_NVRAM_LEN) count = DS1305_NVRAM_LEN; if ((off + count) > DS1305_NVRAM_LEN) count = DS1305_NVRAM_LEN - off; if (unlikely(!count)) return count; addr = (DS1305_WRITE | DS1305_NVRAM) + off; msg_init(&m, x, &addr, count, buf, NULL); status = spi_sync(spi, &m); if (status < 0) dev_err(&spi->dev, "nvram %s error %d\n", "write", status); return (status < 0) ? status : count; } static struct bin_attribute nvram = { .attr.name = "nvram", .attr.mode = S_IRUGO | S_IWUSR, .read = ds1305_nvram_read, .write = ds1305_nvram_write, .size = DS1305_NVRAM_LEN, }; /*----------------------------------------------------------------------*/ /* * Interface to SPI stack */ static int __devinit ds1305_probe(struct spi_device *spi) { struct ds1305 *ds1305; struct rtc_device *rtc; int status; u8 addr, value; struct ds1305_platform_data *pdata = spi->dev.platform_data; bool write_ctrl = false; /* Sanity check board setup data. This may be hooked up * in 3wire mode, but we don't care. Note that unless * there's an inverter in place, this needs SPI_CS_HIGH! */ if ((spi->bits_per_word && spi->bits_per_word != 8) || (spi->max_speed_hz > 2000000) || !(spi->mode & SPI_CPHA)) return -EINVAL; /* set up driver data */ ds1305 = kzalloc(sizeof *ds1305, GFP_KERNEL); if (!ds1305) return -ENOMEM; ds1305->spi = spi; spi_set_drvdata(spi, ds1305); /* read and cache control registers */ addr = DS1305_CONTROL; status = spi_write_then_read(spi, &addr, sizeof addr, ds1305->ctrl, sizeof ds1305->ctrl); if (status < 0) { dev_dbg(&spi->dev, "can't %s, %d\n", "read", status); goto fail0; } dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n", "read", ds1305->ctrl[0], ds1305->ctrl[1], ds1305->ctrl[2]); /* Sanity check register values ... partially compensating for the * fact that SPI has no device handshake. A pullup on MISO would * make these tests fail; but not all systems will have one. If * some register is neither 0x00 nor 0xff, a chip is likely there. */ if ((ds1305->ctrl[0] & 0x38) != 0 || (ds1305->ctrl[1] & 0xfc) != 0) { dev_dbg(&spi->dev, "RTC chip is not present\n"); status = -ENODEV; goto fail0; } if (ds1305->ctrl[2] == 0) dev_dbg(&spi->dev, "chip may not be present\n"); /* enable writes if needed ... if we were paranoid it would * make sense to enable them only when absolutely necessary. */ if (ds1305->ctrl[0] & DS1305_WP) { u8 buf[2]; ds1305->ctrl[0] &= ~DS1305_WP; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); dev_dbg(&spi->dev, "clear WP --> %d\n", status); if (status < 0) goto fail0; } /* on DS1305, maybe start oscillator; like most low power * oscillators, it may take a second to stabilize */ if (ds1305->ctrl[0] & DS1305_nEOSC) { ds1305->ctrl[0] &= ~DS1305_nEOSC; write_ctrl = true; dev_warn(&spi->dev, "SET TIME!\n"); } /* ack any pending IRQs */ if (ds1305->ctrl[1]) { ds1305->ctrl[1] = 0; write_ctrl = true; } /* this may need one-time (re)init */ if (pdata) { /* maybe enable trickle charge */ if (((ds1305->ctrl[2] & 0xf0) != DS1305_TRICKLE_MAGIC)) { ds1305->ctrl[2] = DS1305_TRICKLE_MAGIC | pdata->trickle; write_ctrl = true; } /* on DS1306, configure 1 Hz signal */ if (pdata->is_ds1306) { if (pdata->en_1hz) { if (!(ds1305->ctrl[0] & DS1306_1HZ)) { ds1305->ctrl[0] |= DS1306_1HZ; write_ctrl = true; } } else { if (ds1305->ctrl[0] & DS1306_1HZ) { ds1305->ctrl[0] &= ~DS1306_1HZ; write_ctrl = true; } } } } if (write_ctrl) { u8 buf[4]; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; buf[2] = ds1305->ctrl[1]; buf[3] = ds1305->ctrl[2]; status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); if (status < 0) { dev_dbg(&spi->dev, "can't %s, %d\n", "write", status); goto fail0; } dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n", "write", ds1305->ctrl[0], ds1305->ctrl[1], ds1305->ctrl[2]); } /* see if non-Linux software set up AM/PM mode */ addr = DS1305_HOUR; status = spi_write_then_read(spi, &addr, sizeof addr, &value, sizeof value); if (status < 0) { dev_dbg(&spi->dev, "read HOUR --> %d\n", status); goto fail0; } ds1305->hr12 = (DS1305_HR_12 & value) != 0; if (ds1305->hr12) dev_dbg(&spi->dev, "AM/PM\n"); /* register RTC ... from here on, ds1305->ctrl needs locking */ rtc = rtc_device_register("ds1305", &spi->dev, &ds1305_ops, THIS_MODULE); if (IS_ERR(rtc)) { status = PTR_ERR(rtc); dev_dbg(&spi->dev, "register rtc --> %d\n", status); goto fail0; } ds1305->rtc = rtc; /* Maybe set up alarm IRQ; be ready to handle it triggering right * away. NOTE that we don't share this. The signal is active low, * and we can't ack it before a SPI message delay. We temporarily * disable the IRQ until it's acked, which lets us work with more * IRQ trigger modes (not all IRQ controllers can do falling edge). */ if (spi->irq) { INIT_WORK(&ds1305->work, ds1305_work); status = request_irq(spi->irq, ds1305_irq, 0, dev_name(&rtc->dev), ds1305); if (status < 0) { dev_dbg(&spi->dev, "request_irq %d --> %d\n", spi->irq, status); goto fail1; } } /* export NVRAM */ status = sysfs_create_bin_file(&spi->dev.kobj, &nvram); if (status < 0) { dev_dbg(&spi->dev, "register nvram --> %d\n", status); goto fail2; } return 0; fail2: free_irq(spi->irq, ds1305); fail1: rtc_device_unregister(rtc); fail0: kfree(ds1305); return status; } static int __devexit ds1305_remove(struct spi_device *spi) { struct ds1305 *ds1305 = spi_get_drvdata(spi); sysfs_remove_bin_file(&spi->dev.kobj, &nvram); /* carefully shut down irq and workqueue, if present */ if (spi->irq) { set_bit(FLAG_EXITING, &ds1305->flags); free_irq(spi->irq, ds1305); flush_scheduled_work(); } rtc_device_unregister(ds1305->rtc); spi_set_drvdata(spi, NULL); kfree(ds1305); return 0; } static struct spi_driver ds1305_driver = { .driver.name = "rtc-ds1305", .driver.owner = THIS_MODULE, .probe = ds1305_probe, .remove = __devexit_p(ds1305_remove), /* REVISIT add suspend/resume */ }; static int __init ds1305_init(void) { return spi_register_driver(&ds1305_driver); } module_init(ds1305_init); static void __exit ds1305_exit(void) { spi_unregister_driver(&ds1305_driver); } module_exit(ds1305_exit); MODULE_DESCRIPTION("RTC driver for DS1305 and DS1306 chips"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:rtc-ds1305");
gpl-2.0
aospl/kernel_samsung_smdk4412
mm/swap_state.c
471
11302
/* * linux/mm/swap_state.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * * Rewritten to use page cache, (C) 1998 Stephen Tweedie */ #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/migrate.h> #include <linux/page_cgroup.h> #include <asm/pgtable.h> /* * swapper_space is a fiction, retained to simplify the path through * vmscan's shrink_page_list. */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, .set_page_dirty = __set_page_dirty_no_writeback, .migratepage = migrate_page, }; static struct backing_dev_info swap_backing_dev_info = { .name = "swap", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, }; struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) static struct { unsigned long add_total; unsigned long del_total; unsigned long find_success; unsigned long find_total; } swap_cache_info; void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages); printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", swap_cache_info.add_total, swap_cache_info.del_total, swap_cache_info.find_success, swap_cache_info.find_total); printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); } /* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ static int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageSwapCache(page)); VM_BUG_ON(!PageSwapBacked(page)); page_cache_get(page); SetPageSwapCache(page); set_page_private(page, entry.val); spin_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (likely(!error)) { total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(add_total); } spin_unlock_irq(&swapper_space.tree_lock); if (unlikely(error)) { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page, 0UL); ClearPageSwapCache(page); page_cache_release(page); } return error; } int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; error = radix_tree_preload(gfp_mask); if (!error) { error = __add_to_swap_cache(page, entry); radix_tree_preload_end(); } return error; } /* * This must be called only on pages that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct page *page) { VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapCache(page)); VM_BUG_ON(PageWriteback(page)); radix_tree_delete(&swapper_space.page_tree, page_private(page)); set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); } /** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageUptodate(page)); entry = get_swap_page(); if (!entry.val) return 0; if (unlikely(PageTransHuge(page))) if (unlikely(split_huge_page(page))) { swapcache_free(entry, NULL); return 0; } /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache and mark it dirty */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); if (!err) { /* Success */ SetPageDirty(page); return 1; } else { /* -ENOMEM radix-tree allocation failure */ /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ swapcache_free(entry, NULL); return 0; } } /* * This must be called only on pages that have * been verified to be in the swap cache and locked. * It will never put the page into the free list, * the caller has a reference on the page. */ void delete_from_swap_cache(struct page *page) { swp_entry_t entry; entry.val = page_private(page); spin_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); spin_unlock_irq(&swapper_space.tree_lock); swapcache_free(entry, page); page_cache_release(page); } /* * If we are the only user, then try to free up the swap cache. * * Its ok to check for PageSwapCache without the page lock * here because we are going to recheck again inside * try_to_free_swap() _with_ the lock. * - Marcelo */ static inline void free_swap_cache(struct page *page) { if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { try_to_free_swap(page); unlock_page(page); } } /* * Perform a free_page(), also freeing any swap cache associated with * this page if it is the last user of the page. */ void free_page_and_swap_cache(struct page *page) { free_swap_cache(page); page_cache_release(page); } /* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */ void free_pages_and_swap_cache(struct page **pages, int nr) { struct page **pagep = pages; lru_add_drain(); while (nr) { int todo = min(nr, PAGEVEC_SIZE); int i; for (i = 0; i < todo; i++) free_swap_cache(pagep[i]); release_pages(pagep, todo, 0); pagep += todo; nr -= todo; } } /* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel * lock getting page table operations atomic even if we drop the page * lock before returning. */ struct page * lookup_swap_cache(swp_entry_t entry) { struct page *page; page = find_get_page(&swapper_space, entry.val); if (page) INC_CACHE_INFO(find_success); INC_CACHE_INFO(find_total); return page; } /* * Locate a page of swap in physical memory, reserving swap cache space * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { struct page *found_page, *new_page = NULL; int err; do { /* * First check the swap cache. Since this is normally * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ found_page = find_get_page(&swapper_space, entry.val); if (found_page) break; /* * Get a new page to read into from swap. */ if (!new_page) { new_page = alloc_page_vma(gfp_mask, vma, addr); if (!new_page) break; /* Out of memory */ } /* * call radix_tree_preload() while we can wait. */ err = radix_tree_preload(gfp_mask & GFP_KERNEL); if (err) break; /* * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry); if (err == -EEXIST) { radix_tree_preload_end(); /* * We might race against get_swap_page() and stumble * across a SWAP_HAS_CACHE swap_map entry whose page * has not been brought into the swapcache yet, while * the other end is scheduled away waiting on discard * I/O completion at scan_swap_map(). * * In order to avoid turning this transitory state * into a permanent loop around this -EEXIST case * if !CONFIG_PREEMPT and the I/O completion happens * to be waiting on the CPU waitqueue where we are now * busy looping, we just conditionally invoke the * scheduler here, if there are some more important * tasks to run. */ cond_resched(); continue; } if (err) { /* swp entry is obsolete ? */ radix_tree_preload_end(); break; } /* May fail (-ENOMEM) if radix-tree node allocation failed. */ __set_page_locked(new_page); SetPageSwapBacked(new_page); err = __add_to_swap_cache(new_page, entry); if (likely(!err)) { radix_tree_preload_end(); /* * Initiate read into locked page and return. */ lru_cache_add_anon(new_page); swap_readpage(new_page); return new_page; } radix_tree_preload_end(); ClearPageSwapBacked(new_page); __clear_page_locked(new_page); /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ swapcache_free(entry, NULL); } while (err != -ENOMEM); if (new_page) page_cache_release(new_page); return found_page; } /** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vma: user vma this address belongs to * @addr: target address for mempolicy * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vma is not NULL. */ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { int nr_pages; struct page *page; unsigned long offset; unsigned long end_offset; /* * Get starting offset for readaround, and number of pages to read. * Adjust starting address by readbehind (for NUMA interleave case)? * No, it's very unlikely that swap layout would follow vma layout, * more likely that neighbouring swap pages came from the same node: * so use the same "addr" to choose the same node for each swap read. */ nr_pages = valid_swaphandles(entry, &offset); for (end_offset = offset + nr_pages; offset < end_offset; offset++) { /* Ok, do the async read-ahead now */ page = read_swap_cache_async(swp_entry(swp_type(entry), offset), gfp_mask, vma, addr); if (!page) break; page_cache_release(page); } lru_add_drain(); /* Push any new pages onto the LRU now */ return read_swap_cache_async(entry, gfp_mask, vma, addr); }
gpl-2.0
wangxingchao/spi-omap
fs/ocfs2/alloc.c
1239
191459
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * alloc.c * * Extent allocs and frees * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "blockcheck.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" #include "journal.h" #include "localalloc.h" #include "suballoc.h" #include "sysfile.h" #include "file.h" #include "super.h" #include "uptodate.h" #include "xattr.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" enum ocfs2_contig_type { CONTIG_NONE = 0, CONTIG_LEFT, CONTIG_RIGHT, CONTIG_LEFTRIGHT, }; static enum ocfs2_contig_type ocfs2_extent_rec_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec); /* * Operations for a specific extent tree type. * * To implement an on-disk btree (extent tree) type in ocfs2, add * an ocfs2_extent_tree_operations structure and the matching * ocfs2_init_<thingy>_extent_tree() function. That's pretty much it * for the allocation portion of the extent tree. */ struct ocfs2_extent_tree_operations { /* * last_eb_blk is the block number of the right most leaf extent * block. Most on-disk structures containing an extent tree store * this value for fast access. The ->eo_set_last_eb_blk() and * ->eo_get_last_eb_blk() operations access this value. They are * both required. */ void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et, u64 blkno); u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et); /* * The on-disk structure usually keeps track of how many total * clusters are stored in this extent tree. This function updates * that value. new_clusters is the delta, and must be * added to the total. Required. */ void (*eo_update_clusters)(struct ocfs2_extent_tree *et, u32 new_clusters); /* * If this extent tree is supported by an extent map, insert * a record into the map. */ void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); /* * If this extent tree is supported by an extent map, truncate the * map to clusters, */ void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et, u32 clusters); /* * If ->eo_insert_check() exists, it is called before rec is * inserted into the extent tree. It is optional. */ int (*eo_insert_check)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); int (*eo_sanity_check)(struct ocfs2_extent_tree *et); /* * -------------------------------------------------------------- * The remaining are internal to ocfs2_extent_tree and don't have * accessor functions */ /* * ->eo_fill_root_el() takes et->et_object and sets et->et_root_el. * It is required. */ void (*eo_fill_root_el)(struct ocfs2_extent_tree *et); /* * ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if * it exists. If it does not, et->et_max_leaf_clusters is set * to 0 (unlimited). Optional. */ void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et); /* * ->eo_extent_contig test whether the 2 ocfs2_extent_rec * are contiguous or not. Optional. Don't need to set it if use * ocfs2_extent_rec as the tree leaf. */ enum ocfs2_contig_type (*eo_extent_contig)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec); }; /* * Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check * in the methods. */ static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et); static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno); static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters); static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters); static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et); static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et); static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = { .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk, .eo_update_clusters = ocfs2_dinode_update_clusters, .eo_extent_map_insert = ocfs2_dinode_extent_map_insert, .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate, .eo_insert_check = ocfs2_dinode_insert_check, .eo_sanity_check = ocfs2_dinode_sanity_check, .eo_fill_root_el = ocfs2_dinode_fill_root_el, }; static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); di->i_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); return le64_to_cpu(di->i_last_eb_blk); } static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); struct ocfs2_dinode *di = et->et_object; le32_add_cpu(&di->i_clusters, clusters); spin_lock(&oi->ip_lock); oi->ip_clusters = le32_to_cpu(di->i_clusters); spin_unlock(&oi->ip_lock); } static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; ocfs2_extent_map_insert_rec(inode, rec); } static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters) { struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; ocfs2_extent_map_trunc(inode, clusters); } static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb); BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL); mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && (oi->ip_clusters != le32_to_cpu(rec->e_cpos)), "Device %s, asking for sparse allocation: inode %llu, " "cpos %u, clusters %u\n", osb->dev_str, (unsigned long long)oi->ip_blkno, rec->e_cpos, oi->ip_clusters); return 0; } static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); BUG_ON(!OCFS2_IS_VALID_DINODE(di)); return 0; } static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; et->et_root_el = &di->id2.i_list; } static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_value_buf *vb = et->et_object; et->et_root_el = &vb->vb_xv->xr_list; } static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_xattr_value_buf *vb = et->et_object; vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_value_buf *vb = et->et_object; return le64_to_cpu(vb->vb_xv->xr_last_eb_blk); } static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_value_buf *vb = et->et_object; le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); } static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = { .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk, .eo_update_clusters = ocfs2_xattr_value_update_clusters, .eo_fill_root_el = ocfs2_xattr_value_fill_root_el, }; static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_block *xb = et->et_object; et->et_root_el = &xb->xb_attrs.xb_root.xt_list; } static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et) { struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); et->et_max_leaf_clusters = ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE); } static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_xattr_block *xb = et->et_object; struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; xt->xt_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_block *xb = et->et_object; struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; return le64_to_cpu(xt->xt_last_eb_blk); } static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_block *xb = et->et_object; le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters); } static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = { .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk, .eo_update_clusters = ocfs2_xattr_tree_update_clusters, .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el, .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters, }; static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_dx_root_block *dx_root = et->et_object; dx_root->dr_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; return le64_to_cpu(dx_root->dr_last_eb_blk); } static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_dx_root_block *dx_root = et->et_object; le32_add_cpu(&dx_root->dr_clusters, clusters); } static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root)); return 0; } static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; et->et_root_el = &dx_root->dr_list; } static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = { .eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk, .eo_update_clusters = ocfs2_dx_root_update_clusters, .eo_sanity_check = ocfs2_dx_root_sanity_check, .eo_fill_root_el = ocfs2_dx_root_fill_root_el, }; static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_refcount_block *rb = et->et_object; et->et_root_el = &rb->rf_list; } static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_refcount_block *rb = et->et_object; rb->rf_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_refcount_block *rb = et->et_object; return le64_to_cpu(rb->rf_last_eb_blk); } static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_refcount_block *rb = et->et_object; le32_add_cpu(&rb->rf_clusters, clusters); } static enum ocfs2_contig_type ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec) { return CONTIG_NONE; } static struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = { .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk, .eo_update_clusters = ocfs2_refcount_tree_update_clusters, .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el, .eo_extent_contig = ocfs2_refcount_tree_extent_contig, }; static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh, ocfs2_journal_access_func access, void *obj, struct ocfs2_extent_tree_operations *ops) { et->et_ops = ops; et->et_root_bh = bh; et->et_ci = ci; et->et_root_journal_access = access; if (!obj) obj = (void *)bh->b_data; et->et_object = obj; et->et_ops->eo_fill_root_el(et); if (!et->et_ops->eo_fill_max_leaf_clusters) et->et_max_leaf_clusters = 0; else et->et_ops->eo_fill_max_leaf_clusters(et); } void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di, NULL, &ocfs2_dinode_et_ops); } void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb, NULL, &ocfs2_xattr_tree_et_ops); } void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct ocfs2_xattr_value_buf *vb) { __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb, &ocfs2_xattr_value_et_ops); } void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr, NULL, &ocfs2_dx_root_et_ops); } void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb, NULL, &ocfs2_refcount_tree_et_ops); } static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 new_last_eb_blk) { et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk); } static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et) { return et->et_ops->eo_get_last_eb_blk(et); } static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { et->et_ops->eo_update_clusters(et, clusters); } static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { if (et->et_ops->eo_extent_map_insert) et->et_ops->eo_extent_map_insert(et, rec); } static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters) { if (et->et_ops->eo_extent_map_truncate) et->et_ops->eo_extent_map_truncate(et, clusters); } static inline int ocfs2_et_root_journal_access(handle_t *handle, struct ocfs2_extent_tree *et, int type) { return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh, type); } static inline enum ocfs2_contig_type ocfs2_et_extent_contig(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec, struct ocfs2_extent_rec *insert_rec) { if (et->et_ops->eo_extent_contig) return et->et_ops->eo_extent_contig(et, rec, insert_rec); return ocfs2_extent_rec_contig( ocfs2_metadata_cache_get_super(et->et_ci), rec, insert_rec); } static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { int ret = 0; if (et->et_ops->eo_insert_check) ret = et->et_ops->eo_insert_check(et, rec); return ret; } static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et) { int ret = 0; if (et->et_ops->eo_sanity_check) ret = et->et_ops->eo_sanity_check(et); return ret; } static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb); static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* * Reset the actual path elements so that we can re-use the structure * to build another path. Generally, this involves freeing the buffer * heads. */ void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) { int i, start = 0, depth = 0; struct ocfs2_path_item *node; if (keep_root) start = 1; for(i = start; i < path_num_items(path); i++) { node = &path->p_node[i]; brelse(node->bh); node->bh = NULL; node->el = NULL; } /* * Tree depth may change during truncate, or insert. If we're * keeping the root extent list, then make sure that our path * structure reflects the proper depth. */ if (keep_root) depth = le16_to_cpu(path_root_el(path)->l_tree_depth); else path_root_access(path) = NULL; path->p_tree_depth = depth; } void ocfs2_free_path(struct ocfs2_path *path) { if (path) { ocfs2_reinit_path(path, 0); kfree(path); } } /* * All the elements of src into dest. After this call, src could be freed * without affecting dest. * * Both paths should have the same root. Any non-root elements of dest * will be freed. */ static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) { int i; BUG_ON(path_root_bh(dest) != path_root_bh(src)); BUG_ON(path_root_el(dest) != path_root_el(src)); BUG_ON(path_root_access(dest) != path_root_access(src)); ocfs2_reinit_path(dest, 1); for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { dest->p_node[i].bh = src->p_node[i].bh; dest->p_node[i].el = src->p_node[i].el; if (dest->p_node[i].bh) get_bh(dest->p_node[i].bh); } } /* * Make the *dest path the same as src and re-initialize src path to * have a root only. */ static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) { int i; BUG_ON(path_root_bh(dest) != path_root_bh(src)); BUG_ON(path_root_access(dest) != path_root_access(src)); for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { brelse(dest->p_node[i].bh); dest->p_node[i].bh = src->p_node[i].bh; dest->p_node[i].el = src->p_node[i].el; src->p_node[i].bh = NULL; src->p_node[i].el = NULL; } } /* * Insert an extent block at given index. * * This will not take an additional reference on eb_bh. */ static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, struct buffer_head *eb_bh) { struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; /* * Right now, no root bh is an extent block, so this helps * catch code errors with dinode trees. The assertion can be * safely removed if we ever need to insert extent block * structures at the root. */ BUG_ON(index == 0); path->p_node[index].bh = eb_bh; path->p_node[index].el = &eb->h_list; } static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, struct ocfs2_extent_list *root_el, ocfs2_journal_access_func access) { struct ocfs2_path *path; BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH); path = kzalloc(sizeof(*path), GFP_NOFS); if (path) { path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); get_bh(root_bh); path_root_bh(path) = root_bh; path_root_el(path) = root_el; path_root_access(path) = access; } return path; } struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) { return ocfs2_new_path(path_root_bh(path), path_root_el(path), path_root_access(path)); } struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) { return ocfs2_new_path(et->et_root_bh, et->et_root_el, et->et_root_journal_access); } /* * Journal the buffer at depth idx. All idx>0 are extent_blocks, * otherwise it's the root_access function. * * I don't like the way this function's name looks next to * ocfs2_journal_access_path(), but I don't have a better one. */ int ocfs2_path_bh_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, struct ocfs2_path *path, int idx) { ocfs2_journal_access_func access = path_root_access(path); if (!access) access = ocfs2_journal_access; if (idx) access = ocfs2_journal_access_eb; return access(handle, ci, path->p_node[idx].bh, OCFS2_JOURNAL_ACCESS_WRITE); } /* * Convenience function to journal all components in a path. */ int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, handle_t *handle, struct ocfs2_path *path) { int i, ret = 0; if (!path) goto out; for(i = 0; i < path_num_items(path); i++) { ret = ocfs2_path_bh_journal_access(handle, ci, path, i); if (ret < 0) { mlog_errno(ret); goto out; } } out: return ret; } /* * Return the index of the extent record which contains cluster #v_cluster. * -1 is returned if it was not found. * * Should work fine on interior and exterior nodes. */ int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) { int ret = -1; int i; struct ocfs2_extent_rec *rec; u32 rec_end, rec_start, clusters; for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { rec = &el->l_recs[i]; rec_start = le32_to_cpu(rec->e_cpos); clusters = ocfs2_rec_clusters(el, rec); rec_end = rec_start + clusters; if (v_cluster >= rec_start && v_cluster < rec_end) { ret = i; break; } } return ret; } /* * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and * ocfs2_extent_rec_contig only work properly against leaf nodes! */ static int ocfs2_block_extent_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, u64 blkno) { u64 blk_end = le64_to_cpu(ext->e_blkno); blk_end += ocfs2_clusters_to_blocks(sb, le16_to_cpu(ext->e_leaf_clusters)); return blkno == blk_end; } static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, struct ocfs2_extent_rec *right) { u32 left_range; left_range = le32_to_cpu(left->e_cpos) + le16_to_cpu(left->e_leaf_clusters); return (left_range == le32_to_cpu(right->e_cpos)); } static enum ocfs2_contig_type ocfs2_extent_rec_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec) { u64 blkno = le64_to_cpu(insert_rec->e_blkno); /* * Refuse to coalesce extent records with different flag * fields - we don't want to mix unwritten extents with user * data. */ if (ext->e_flags != insert_rec->e_flags) return CONTIG_NONE; if (ocfs2_extents_adjacent(ext, insert_rec) && ocfs2_block_extent_contig(sb, ext, blkno)) return CONTIG_RIGHT; blkno = le64_to_cpu(ext->e_blkno); if (ocfs2_extents_adjacent(insert_rec, ext) && ocfs2_block_extent_contig(sb, insert_rec, blkno)) return CONTIG_LEFT; return CONTIG_NONE; } /* * NOTE: We can have pretty much any combination of contiguousness and * appending. * * The usefulness of APPEND_TAIL is more in that it lets us know that * we'll have to update the path to that leaf. */ enum ocfs2_append_type { APPEND_NONE = 0, APPEND_TAIL, }; enum ocfs2_split_type { SPLIT_NONE = 0, SPLIT_LEFT, SPLIT_RIGHT, }; struct ocfs2_insert_type { enum ocfs2_split_type ins_split; enum ocfs2_append_type ins_appending; enum ocfs2_contig_type ins_contig; int ins_contig_index; int ins_tree_depth; }; struct ocfs2_merge_ctxt { enum ocfs2_contig_type c_contig_type; int c_has_empty_extent; int c_split_covers_rec; }; static int ocfs2_validate_extent_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)bh->b_data; trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); if (rc) { mlog(ML_ERROR, "Checksum failed for extent block %llu\n", (unsigned long long)bh->b_blocknr); return rc; } /* * Errors after here are fatal. */ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_error(sb, "Extent block #%llu has bad signature %.*s", (unsigned long long)bh->b_blocknr, 7, eb->h_signature); return -EINVAL; } if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { ocfs2_error(sb, "Extent block #%llu has an invalid h_blkno " "of %llu", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(eb->h_blkno)); return -EINVAL; } if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_error(sb, "Extent block #%llu has an invalid " "h_fs_generation of #%u", (unsigned long long)bh->b_blocknr, le32_to_cpu(eb->h_fs_generation)); return -EINVAL; } return 0; } int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno, struct buffer_head **bh) { int rc; struct buffer_head *tmp = *bh; rc = ocfs2_read_block(ci, eb_blkno, &tmp, ocfs2_validate_extent_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!rc && !*bh) *bh = tmp; return rc; } /* * How many free extents have we got before we need more meta data? */ int ocfs2_num_free_extents(struct ocfs2_super *osb, struct ocfs2_extent_tree *et) { int retval; struct ocfs2_extent_list *el = NULL; struct ocfs2_extent_block *eb; struct buffer_head *eb_bh = NULL; u64 last_eb_blk = 0; el = et->et_root_el; last_eb_blk = ocfs2_et_get_last_eb_blk(et); if (last_eb_blk) { retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk, &eb_bh); if (retval < 0) { mlog_errno(retval); goto bail; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; } BUG_ON(el->l_tree_depth != 0); retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); bail: brelse(eb_bh); trace_ocfs2_num_free_extents(retval); return retval; } /* expects array to already be allocated * * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and * l_count for you */ static int ocfs2_create_new_meta_bhs(handle_t *handle, struct ocfs2_extent_tree *et, int wanted, struct ocfs2_alloc_context *meta_ac, struct buffer_head *bhs[]) { int count, status, i; u16 suballoc_bit_start; u32 num_got; u64 suballoc_loc, first_blkno; struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); struct ocfs2_extent_block *eb; count = 0; while (count < wanted) { status = ocfs2_claim_metadata(handle, meta_ac, wanted - count, &suballoc_loc, &suballoc_bit_start, &num_got, &first_blkno); if (status < 0) { mlog_errno(status); goto bail; } for(i = count; i < (num_got + count); i++) { bhs[i] = sb_getblk(osb->sb, first_blkno); if (bhs[i] == NULL) { status = -EIO; mlog_errno(status); goto bail; } ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]); status = ocfs2_journal_access_eb(handle, et->et_ci, bhs[i], OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); eb = (struct ocfs2_extent_block *) bhs[i]->b_data; /* Ok, setup the minimal stuff here. */ strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(first_blkno); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb->h_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); eb->h_suballoc_loc = cpu_to_le64(suballoc_loc); eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); eb->h_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); suballoc_bit_start++; first_blkno++; /* We'll also be dirtied by the caller, so * this isn't absolutely necessary. */ ocfs2_journal_dirty(handle, bhs[i]); } count += num_got; } status = 0; bail: if (status < 0) { for(i = 0; i < wanted; i++) { brelse(bhs[i]); bhs[i] = NULL; } mlog_errno(status); } return status; } /* * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth(). * * Returns the sum of the rightmost extent rec logical offset and * cluster count. * * ocfs2_add_branch() uses this to determine what logical cluster * value should be populated into the leftmost new branch records. * * ocfs2_shift_tree_depth() uses this to determine the # clusters * value for the new topmost tree record. */ static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) { int i; i = le16_to_cpu(el->l_next_free_rec) - 1; return le32_to_cpu(el->l_recs[i].e_cpos) + ocfs2_rec_clusters(el, &el->l_recs[i]); } /* * Change range of the branches in the right most path according to the leaf * extent block's rightmost record. */ static int ocfs2_adjust_rightmost_branch(handle_t *handle, struct ocfs2_extent_tree *et) { int status; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; path = ocfs2_new_path_from_et(et); if (!path) { status = -ENOMEM; return status; } status = ocfs2_find_path(et->et_ci, path, UINT_MAX); if (status < 0) { mlog_errno(status); goto out; } status = ocfs2_extend_trans(handle, path_num_items(path)); if (status < 0) { mlog_errno(status); goto out; } status = ocfs2_journal_access_path(et->et_ci, handle, path); if (status < 0) { mlog_errno(status); goto out; } el = path_leaf_el(path); rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1]; ocfs2_adjust_rightmost_records(handle, et, path, rec); out: ocfs2_free_path(path); return status; } /* * Add an entire tree branch to our inode. eb_bh is the extent block * to start at, if we don't want to start the branch at the root * structure. * * last_eb_bh is required as we have to update it's next_leaf pointer * for the new last extent block. * * the new branch will be 'empty' in the sense that every block will * contain a single record with cluster count == 0. */ static int ocfs2_add_branch(handle_t *handle, struct ocfs2_extent_tree *et, struct buffer_head *eb_bh, struct buffer_head **last_eb_bh, struct ocfs2_alloc_context *meta_ac) { int status, new_blocks, i; u64 next_blkno, new_last_eb_blk; struct buffer_head *bh; struct buffer_head **new_eb_bhs = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *eb_el; struct ocfs2_extent_list *el; u32 new_cpos, root_end; BUG_ON(!last_eb_bh || !*last_eb_bh); if (eb_bh) { eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; } else el = et->et_root_el; /* we never add a branch to a leaf. */ BUG_ON(!el->l_tree_depth); new_blocks = le16_to_cpu(el->l_tree_depth); eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); root_end = ocfs2_sum_rightmost_rec(et->et_root_el); /* * If there is a gap before the root end and the real end * of the righmost leaf block, we need to remove the gap * between new_cpos and root_end first so that the tree * is consistent after we add a new branch(it will start * from new_cpos). */ if (root_end > new_cpos) { trace_ocfs2_adjust_rightmost_branch( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), root_end, new_cpos); status = ocfs2_adjust_rightmost_branch(handle, et); if (status) { mlog_errno(status); goto bail; } } /* allocate the number of new eb blocks we need */ new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *), GFP_KERNEL); if (!new_eb_bhs) { status = -ENOMEM; mlog_errno(status); goto bail; } status = ocfs2_create_new_meta_bhs(handle, et, new_blocks, meta_ac, new_eb_bhs); if (status < 0) { mlog_errno(status); goto bail; } /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be * linked with the rest of the tree. * conversly, new_eb_bhs[0] is the new bottommost leaf. * * when we leave the loop, new_last_eb_blk will point to the * newest leaf, and next_blkno will point to the topmost extent * block. */ next_blkno = new_last_eb_blk = 0; for(i = 0; i < new_blocks; i++) { bh = new_eb_bhs[i]; eb = (struct ocfs2_extent_block *) bh->b_data; /* ocfs2_create_new_meta_bhs() should create it right! */ BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; status = ocfs2_journal_access_eb(handle, et->et_ci, bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } eb->h_next_leaf_blk = 0; eb_el->l_tree_depth = cpu_to_le16(i); eb_el->l_next_free_rec = cpu_to_le16(1); /* * This actually counts as an empty extent as * c_clusters == 0 */ eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos); eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); /* * eb_el isn't always an interior node, but even leaf * nodes want a zero'd flags and reserved field so * this gets the whole 32 bits regardless of use. */ eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0); if (!eb_el->l_tree_depth) new_last_eb_blk = le64_to_cpu(eb->h_blkno); ocfs2_journal_dirty(handle, bh); next_blkno = le64_to_cpu(eb->h_blkno); } /* This is a bit hairy. We want to update up to three blocks * here without leaving any of them in an inconsistent state * in case of error. We don't have to worry about * journal_dirty erroring as it won't unless we've aborted the * handle (in which case we would never be here) so reserving * the write with journal_access is all we need to do. */ status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } if (eb_bh) { status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } } /* Link the new branch into the rest of the tree (el will * either be on the root_bh, or the extent block passed in. */ i = le16_to_cpu(el->l_next_free_rec); el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); el->l_recs[i].e_int_clusters = 0; le16_add_cpu(&el->l_next_free_rec, 1); /* fe needs a new last extent block pointer, as does the * next_leaf on the previously last-extent-block. */ ocfs2_et_set_last_eb_blk(et, new_last_eb_blk); eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); ocfs2_journal_dirty(handle, *last_eb_bh); ocfs2_journal_dirty(handle, et->et_root_bh); if (eb_bh) ocfs2_journal_dirty(handle, eb_bh); /* * Some callers want to track the rightmost leaf so pass it * back here. */ brelse(*last_eb_bh); get_bh(new_eb_bhs[0]); *last_eb_bh = new_eb_bhs[0]; status = 0; bail: if (new_eb_bhs) { for (i = 0; i < new_blocks; i++) brelse(new_eb_bhs[i]); kfree(new_eb_bhs); } return status; } /* * adds another level to the allocation tree. * returns back the new extent block so you can add a branch to it * after this call. */ static int ocfs2_shift_tree_depth(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_alloc_context *meta_ac, struct buffer_head **ret_new_eb_bh) { int status, i; u32 new_clusters; struct buffer_head *new_eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *root_el; struct ocfs2_extent_list *eb_el; status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, &new_eb_bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; /* ocfs2_create_new_meta_bhs() should create it right! */ BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; root_el = et->et_root_el; status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } /* copy the root extent list data into the new extent block */ eb_el->l_tree_depth = root_el->l_tree_depth; eb_el->l_next_free_rec = root_el->l_next_free_rec; for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++) eb_el->l_recs[i] = root_el->l_recs[i]; ocfs2_journal_dirty(handle, new_eb_bh); status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } new_clusters = ocfs2_sum_rightmost_rec(eb_el); /* update root_bh now */ le16_add_cpu(&root_el->l_tree_depth, 1); root_el->l_recs[0].e_cpos = 0; root_el->l_recs[0].e_blkno = eb->h_blkno; root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters); for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); root_el->l_next_free_rec = cpu_to_le16(1); /* If this is our 1st tree depth shift, then last_eb_blk * becomes the allocated extent block */ if (root_el->l_tree_depth == cpu_to_le16(1)) ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_journal_dirty(handle, et->et_root_bh); *ret_new_eb_bh = new_eb_bh; new_eb_bh = NULL; status = 0; bail: brelse(new_eb_bh); return status; } /* * Should only be called when there is no space left in any of the * leaf nodes. What we want to do is find the lowest tree depth * non-leaf extent block with room for new records. There are three * valid results of this search: * * 1) a lowest extent block is found, then we pass it back in * *lowest_eb_bh and return '0' * * 2) the search fails to find anything, but the root_el has room. We * pass NULL back in *lowest_eb_bh, but still return '0' * * 3) the search fails to find anything AND the root_el is full, in * which case we return > 0 * * return status < 0 indicates an error. */ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et, struct buffer_head **target_bh) { int status = 0, i; u64 blkno; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *bh = NULL; struct buffer_head *lowest_bh = NULL; *target_bh = NULL; el = et->et_root_el; while(le16_to_cpu(el->l_tree_depth) > 1) { if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has empty " "extent list (next_free_rec == 0)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); status = -EIO; goto bail; } i = le16_to_cpu(el->l_next_free_rec) - 1; blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (!blkno) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has extent " "list where extent # %d has no physical " "block start", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i); status = -EIO; goto bail; } brelse(bh); bh = NULL; status = ocfs2_read_extent_block(et->et_ci, blkno, &bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count)) { brelse(lowest_bh); lowest_bh = bh; get_bh(lowest_bh); } } /* If we didn't find one and the fe doesn't have any room, * then return '1' */ el = et->et_root_el; if (!lowest_bh && (el->l_next_free_rec == el->l_count)) status = 1; *target_bh = lowest_bh; bail: brelse(bh); return status; } /* * Grow a b-tree so that it has more records. * * We might shift the tree depth in which case existing paths should * be considered invalid. * * Tree depth after the grow is returned via *final_depth. * * *last_eb_bh will be updated by ocfs2_add_branch(). */ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et, int *final_depth, struct buffer_head **last_eb_bh, struct ocfs2_alloc_context *meta_ac) { int ret, shift; struct ocfs2_extent_list *el = et->et_root_el; int depth = le16_to_cpu(el->l_tree_depth); struct buffer_head *bh = NULL; BUG_ON(meta_ac == NULL); shift = ocfs2_find_branch_target(et, &bh); if (shift < 0) { ret = shift; mlog_errno(ret); goto out; } /* We traveled all the way to the bottom of the allocation tree * and didn't find room for any more extents - we need to add * another tree level */ if (shift) { BUG_ON(bh); trace_ocfs2_grow_tree( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), depth); /* ocfs2_shift_tree_depth will return us a buffer with * the new extent block (so we can pass that to * ocfs2_add_branch). */ ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh); if (ret < 0) { mlog_errno(ret); goto out; } depth++; if (depth == 1) { /* * Special case: we have room now if we shifted from * tree_depth 0, so no more work needs to be done. * * We won't be calling add_branch, so pass * back *last_eb_bh as the new leaf. At depth * zero, it should always be null so there's * no reason to brelse. */ BUG_ON(*last_eb_bh); get_bh(bh); *last_eb_bh = bh; goto out; } } /* call ocfs2_add_branch to add the final part of the tree with * the new data. */ ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, meta_ac); if (ret < 0) { mlog_errno(ret); goto out; } out: if (final_depth) *final_depth = depth; brelse(bh); return ret; } /* * This function will discard the rightmost extent record. */ static void ocfs2_shift_records_right(struct ocfs2_extent_list *el) { int next_free = le16_to_cpu(el->l_next_free_rec); int count = le16_to_cpu(el->l_count); unsigned int num_bytes; BUG_ON(!next_free); /* This will cause us to go off the end of our extent list. */ BUG_ON(next_free >= count); num_bytes = sizeof(struct ocfs2_extent_rec) * next_free; memmove(&el->l_recs[1], &el->l_recs[0], num_bytes); } static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i, insert_index, next_free, has_empty, num_bytes; u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos); struct ocfs2_extent_rec *rec; next_free = le16_to_cpu(el->l_next_free_rec); has_empty = ocfs2_is_empty_extent(&el->l_recs[0]); BUG_ON(!next_free); /* The tree code before us didn't allow enough room in the leaf. */ BUG_ON(el->l_next_free_rec == el->l_count && !has_empty); /* * The easiest way to approach this is to just remove the * empty extent and temporarily decrement next_free. */ if (has_empty) { /* * If next_free was 1 (only an empty extent), this * loop won't execute, which is fine. We still want * the decrement above to happen. */ for(i = 0; i < (next_free - 1); i++) el->l_recs[i] = el->l_recs[i+1]; next_free--; } /* * Figure out what the new record index should be. */ for(i = 0; i < next_free; i++) { rec = &el->l_recs[i]; if (insert_cpos < le32_to_cpu(rec->e_cpos)) break; } insert_index = i; trace_ocfs2_rotate_leaf(insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); BUG_ON(insert_index < 0); BUG_ON(insert_index >= le16_to_cpu(el->l_count)); BUG_ON(insert_index > next_free); /* * No need to memmove if we're just adding to the tail. */ if (insert_index != next_free) { BUG_ON(next_free >= le16_to_cpu(el->l_count)); num_bytes = next_free - insert_index; num_bytes *= sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[insert_index + 1], &el->l_recs[insert_index], num_bytes); } /* * Either we had an empty extent, and need to re-increment or * there was no empty extent on a non full rightmost leaf node, * in which case we still need to increment. */ next_free++; el->l_next_free_rec = cpu_to_le16(next_free); /* * Make sure none of the math above just messed up our tree. */ BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)); el->l_recs[insert_index] = *insert_rec; } static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el) { int size, num_recs = le16_to_cpu(el->l_next_free_rec); BUG_ON(num_recs == 0); if (ocfs2_is_empty_extent(&el->l_recs[0])) { num_recs--; size = num_recs * sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[0], &el->l_recs[1], size); memset(&el->l_recs[num_recs], 0, sizeof(struct ocfs2_extent_rec)); el->l_next_free_rec = cpu_to_le16(num_recs); } } /* * Create an empty extent record . * * l_next_free_rec may be updated. * * If an empty extent already exists do nothing. */ static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el) { int next_free = le16_to_cpu(el->l_next_free_rec); BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (next_free == 0) goto set_and_inc; if (ocfs2_is_empty_extent(&el->l_recs[0])) return; mlog_bug_on_msg(el->l_count == el->l_next_free_rec, "Asked to create an empty extent in a full list:\n" "count = %u, tree depth = %u", le16_to_cpu(el->l_count), le16_to_cpu(el->l_tree_depth)); ocfs2_shift_records_right(el); set_and_inc: le16_add_cpu(&el->l_next_free_rec, 1); memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } /* * For a rotation which involves two leaf nodes, the "root node" is * the lowest level tree node which contains a path to both leafs. This * resulting set of information can be used to form a complete "subtree" * * This function is passed two full paths from the dinode down to a * pair of adjacent leaves. It's task is to figure out which path * index contains the subtree root - this can be the root index itself * in a worst-case rotation. * * The array index of the subtree root is passed back. */ int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, struct ocfs2_path *left, struct ocfs2_path *right) { int i = 0; /* * Check that the caller passed in two paths from the same tree. */ BUG_ON(path_root_bh(left) != path_root_bh(right)); do { i++; /* * The caller didn't pass two adjacent paths. */ mlog_bug_on_msg(i > left->p_tree_depth, "Owner %llu, left depth %u, right depth %u\n" "left leaf blk %llu, right leaf blk %llu\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), left->p_tree_depth, right->p_tree_depth, (unsigned long long)path_leaf_bh(left)->b_blocknr, (unsigned long long)path_leaf_bh(right)->b_blocknr); } while (left->p_node[i].bh->b_blocknr == right->p_node[i].bh->b_blocknr); return i - 1; } typedef void (path_insert_t)(void *, struct buffer_head *); /* * Traverse a btree path in search of cpos, starting at root_el. * * This code can be called with a cpos larger than the tree, in which * case it will return the rightmost path. */ static int __ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *root_el, u32 cpos, path_insert_t *func, void *data) { int i, ret = 0; u32 range; u64 blkno; struct buffer_head *bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; el = root_el; while (el->l_tree_depth) { if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at " "depth %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth)); ret = -EROFS; goto out; } for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) { rec = &el->l_recs[i]; /* * In the case that cpos is off the allocation * tree, this should just wind up returning the * rightmost record. */ range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) break; } blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (blkno == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has bad blkno in extent list " "at depth %u (index %d)\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth), i); ret = -EROFS; goto out; } brelse(bh); bh = NULL; ret = ocfs2_read_extent_block(ci, blkno, &bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has bad count in extent list " "at block %llu (next free=%u, count=%u)\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)bh->b_blocknr, le16_to_cpu(el->l_next_free_rec), le16_to_cpu(el->l_count)); ret = -EROFS; goto out; } if (func) func(data, bh); } out: /* * Catch any trailing bh that the loop didn't handle. */ brelse(bh); return ret; } /* * Given an initialized path (that is, it has a valid root extent * list), this function will traverse the btree in search of the path * which would contain cpos. * * The path traveled is recorded in the path structure. * * Note that this will not do any comparisons on leaf node extent * records, so it will work fine in the case that we just added a tree * branch. */ struct find_path_data { int index; struct ocfs2_path *path; }; static void find_path_ins(void *data, struct buffer_head *bh) { struct find_path_data *fp = data; get_bh(bh); ocfs2_path_insert_eb(fp->path, fp->index, bh); fp->index++; } int ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_path *path, u32 cpos) { struct find_path_data data; data.index = 1; data.path = path; return __ocfs2_find_path(ci, path_root_el(path), cpos, find_path_ins, &data); } static void find_leaf_ins(void *data, struct buffer_head *bh) { struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; struct ocfs2_extent_list *el = &eb->h_list; struct buffer_head **ret = data; /* We want to retain only the leaf block. */ if (le16_to_cpu(el->l_tree_depth) == 0) { get_bh(bh); *ret = bh; } } /* * Find the leaf block in the tree which would contain cpos. No * checking of the actual leaf is done. * * Some paths want to call this instead of allocating a path structure * and calling ocfs2_find_path(). * * This function doesn't handle non btree extent lists. */ int ocfs2_find_leaf(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *root_el, u32 cpos, struct buffer_head **leaf_bh) { int ret; struct buffer_head *bh = NULL; ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh); if (ret) { mlog_errno(ret); goto out; } *leaf_bh = bh; out: return ret; } /* * Adjust the adjacent records (left_rec, right_rec) involved in a rotation. * * Basically, we've moved stuff around at the bottom of the tree and * we need to fix up the extent records above the changes to reflect * the new changes. * * left_rec: the record on the left. * left_child_el: is the child list pointed to by left_rec * right_rec: the record to the right of left_rec * right_child_el: is the child list pointed to by right_rec * * By definition, this only works on interior nodes. */ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, struct ocfs2_extent_list *left_child_el, struct ocfs2_extent_rec *right_rec, struct ocfs2_extent_list *right_child_el) { u32 left_clusters, right_end; /* * Interior nodes never have holes. Their cpos is the cpos of * the leftmost record in their child list. Their cluster * count covers the full theoretical range of their child list * - the range between their cpos and the cpos of the record * immediately to their right. */ left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) { BUG_ON(right_child_el->l_tree_depth); BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); } left_clusters -= le32_to_cpu(left_rec->e_cpos); left_rec->e_int_clusters = cpu_to_le32(left_clusters); /* * Calculate the rightmost cluster count boundary before * moving cpos - we will need to adjust clusters after * updating e_cpos to keep the same highest cluster count. */ right_end = le32_to_cpu(right_rec->e_cpos); right_end += le32_to_cpu(right_rec->e_int_clusters); right_rec->e_cpos = left_rec->e_cpos; le32_add_cpu(&right_rec->e_cpos, left_clusters); right_end -= le32_to_cpu(right_rec->e_cpos); right_rec->e_int_clusters = cpu_to_le32(right_end); } /* * Adjust the adjacent root node records involved in a * rotation. left_el_blkno is passed in as a key so that we can easily * find it's index in the root list. */ static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, struct ocfs2_extent_list *left_el, struct ocfs2_extent_list *right_el, u64 left_el_blkno) { int i; BUG_ON(le16_to_cpu(root_el->l_tree_depth) <= le16_to_cpu(left_el->l_tree_depth)); for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) { if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno) break; } /* * The path walking code should have never returned a root and * two paths which are not adjacent. */ BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1)); ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el, &root_el->l_recs[i + 1], right_el); } /* * We've changed a leaf block (in right_path) and need to reflect that * change back up the subtree. * * This happens in multiple places: * - When we've moved an extent record from the left path leaf to the right * path leaf to make room for an empty extent in the left path leaf. * - When our insert into the right path leaf is at the leftmost edge * and requires an update of the path immediately to it's left. This * can occur at the end of some types of rotation and appending inserts. * - When we've adjusted the last extent record in the left path leaf and the * 1st extent record in the right path leaf during cross extent block merge. */ static void ocfs2_complete_edge_insert(handle_t *handle, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) { int i, idx; struct ocfs2_extent_list *el, *left_el, *right_el; struct ocfs2_extent_rec *left_rec, *right_rec; struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; /* * Update the counts and position values within all the * interior nodes to reflect the leaf rotation we just did. * * The root node is handled below the loop. * * We begin the loop with right_el and left_el pointing to the * leaf lists and work our way up. * * NOTE: within this loop, left_el and right_el always refer * to the *child* lists. */ left_el = path_leaf_el(left_path); right_el = path_leaf_el(right_path); for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { trace_ocfs2_complete_edge_insert(i); /* * One nice property of knowing that all of these * nodes are below the root is that we only deal with * the leftmost right node record and the rightmost * left node record. */ el = left_path->p_node[i].el; idx = le16_to_cpu(left_el->l_next_free_rec) - 1; left_rec = &el->l_recs[idx]; el = right_path->p_node[i].el; right_rec = &el->l_recs[0]; ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec, right_el); ocfs2_journal_dirty(handle, left_path->p_node[i].bh); ocfs2_journal_dirty(handle, right_path->p_node[i].bh); /* * Setup our list pointers now so that the current * parents become children in the next iteration. */ left_el = left_path->p_node[i].el; right_el = right_path->p_node[i].el; } /* * At the root node, adjust the two adjacent records which * begin our path to the leaves. */ el = left_path->p_node[subtree_index].el; left_el = left_path->p_node[subtree_index + 1].el; right_el = right_path->p_node[subtree_index + 1].el; ocfs2_adjust_root_records(el, left_el, right_el, left_path->p_node[subtree_index + 1].bh->b_blocknr); root_bh = left_path->p_node[subtree_index].bh; ocfs2_journal_dirty(handle, root_bh); } static int ocfs2_rotate_subtree_right(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) { int ret, i; struct buffer_head *right_leaf_bh; struct buffer_head *left_leaf_bh = NULL; struct buffer_head *root_bh; struct ocfs2_extent_list *right_el, *left_el; struct ocfs2_extent_rec move_rec; left_leaf_bh = path_leaf_bh(left_path); left_el = path_leaf_el(left_path); if (left_el->l_next_free_rec != left_el->l_count) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Inode %llu has non-full interior leaf node %llu" "(next free = %u)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)left_leaf_bh->b_blocknr, le16_to_cpu(left_el->l_next_free_rec)); return -EROFS; } /* * This extent block may already have an empty record, so we * return early if so. */ if (ocfs2_is_empty_extent(&left_el->l_recs[0])) return 0; root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } right_leaf_bh = path_leaf_bh(right_path); right_el = path_leaf_el(right_path); /* This is a code error, not a disk corruption. */ mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " "because rightmost leaf block %llu is empty\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)right_leaf_bh->b_blocknr); ocfs2_create_empty_extent(right_el); ocfs2_journal_dirty(handle, right_leaf_bh); /* Do the copy now. */ i = le16_to_cpu(left_el->l_next_free_rec) - 1; move_rec = left_el->l_recs[i]; right_el->l_recs[0] = move_rec; /* * Clear out the record we just copied and shift everything * over, leaving an empty extent in the left leaf. * * We temporarily subtract from next_free_rec so that the * shift will lose the tail record (which is now defunct). */ le16_add_cpu(&left_el->l_next_free_rec, -1); ocfs2_shift_records_right(left_el); memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); le16_add_cpu(&left_el->l_next_free_rec, 1); ocfs2_journal_dirty(handle, left_leaf_bh); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); out: return ret; } /* * Given a full path, determine what cpos value would return us a path * containing the leaf immediately to the left of the current one. * * Will return zero if the path passed in is already the leftmost path. */ int ocfs2_find_cpos_for_left_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) { int i, j, ret = 0; u64 blkno; struct ocfs2_extent_list *el; BUG_ON(path->p_tree_depth == 0); *cpos = 0; blkno = path_leaf_bh(path)->b_blocknr; /* Start at the tree node just above the leaf and work our way up. */ i = path->p_tree_depth - 1; while (i >= 0) { el = path->p_node[i].el; /* * Find the extent record just before the one in our * path. */ for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { if (j == 0) { if (i == 0) { /* * We've determined that the * path specified is already * the leftmost one - return a * cpos of zero. */ goto out; } /* * The leftmost record points to our * leaf - we need to travel up the * tree one level. */ goto next_node; } *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos); *cpos = *cpos + ocfs2_rec_clusters(el, &el->l_recs[j - 1]); *cpos = *cpos - 1; goto out; } } /* * If we got here, we never found a valid node where * the tree indicated one should be. */ ocfs2_error(sb, "Invalid extent tree at extent block %llu\n", (unsigned long long)blkno); ret = -EROFS; goto out; next_node: blkno = path->p_node[i].bh->b_blocknr; i--; } out: return ret; } /* * Extend the transaction by enough credits to complete the rotation, * and still leave at least the original number of credits allocated * to this transaction. */ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, int op_credits, struct ocfs2_path *path) { int ret = 0; int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; if (handle->h_buffer_credits < credits) ret = ocfs2_extend_trans(handle, credits - handle->h_buffer_credits); return ret; } /* * Trap the case where we're inserting into the theoretical range past * the _actual_ left leaf range. Otherwise, we'll rotate a record * whose cpos is less than ours into the right leaf. * * It's only necessary to look at the rightmost record of the left * leaf because the logic that calls us should ensure that the * theoretical ranges in the path components above the leaves are * correct. */ static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path, u32 insert_cpos) { struct ocfs2_extent_list *left_el; struct ocfs2_extent_rec *rec; int next_free; left_el = path_leaf_el(left_path); next_free = le16_to_cpu(left_el->l_next_free_rec); rec = &left_el->l_recs[next_free - 1]; if (insert_cpos > le32_to_cpu(rec->e_cpos)) return 1; return 0; } static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) { int next_free = le16_to_cpu(el->l_next_free_rec); unsigned int range; struct ocfs2_extent_rec *rec; if (next_free == 0) return 0; rec = &el->l_recs[0]; if (ocfs2_is_empty_extent(rec)) { /* Empty list. */ if (next_free == 1) return 0; rec = &el->l_recs[1]; } range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) return 1; return 0; } /* * Rotate all the records in a btree right one record, starting at insert_cpos. * * The path to the rightmost leaf should be passed in. * * The array is assumed to be large enough to hold an entire path (tree depth). * * Upon successful return from this function: * * - The 'right_path' array will contain a path to the leaf block * whose range contains e_cpos. * - That leaf block will have a single empty extent in list index 0. * - In the case that the rotation requires a post-insert update, * *ret_left_path will contain a valid path which can be passed to * ocfs2_insert_path(). */ static int ocfs2_rotate_tree_right(handle_t *handle, struct ocfs2_extent_tree *et, enum ocfs2_split_type split, u32 insert_cpos, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret, start, orig_credits = handle->h_buffer_credits; u32 cpos; struct ocfs2_path *left_path = NULL; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); *ret_left_path = NULL; left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_rotate_tree_right( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos); /* * What we want to do here is: * * 1) Start with the rightmost path. * * 2) Determine a path to the leaf block directly to the left * of that leaf. * * 3) Determine the 'subtree root' - the lowest level tree node * which contains a path to both leaves. * * 4) Rotate the subtree. * * 5) Find the next subtree by considering the left path to be * the new right path. * * The check at the top of this while loop also accepts * insert_cpos == cpos because cpos is only a _theoretical_ * value to get us the left path - insert_cpos might very well * be filling that hole. * * Stop at a cpos of '0' because we either started at the * leftmost branch (i.e., a tree with one branch and a * rotation inside of it), or we've gone as far as we can in * rotating subtrees. */ while (cpos && insert_cpos <= cpos) { trace_ocfs2_rotate_tree_right( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos); ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } mlog_bug_on_msg(path_leaf_bh(left_path) == path_leaf_bh(right_path), "Owner %llu: error during insert of %u " "(left path cpos %u) results in two identical " "paths ending at %llu\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos, (unsigned long long) path_leaf_bh(left_path)->b_blocknr); if (split == SPLIT_NONE && ocfs2_rotate_requires_path_adjustment(left_path, insert_cpos)) { /* * We've rotated the tree as much as we * should. The rest is up to * ocfs2_insert_path() to complete, after the * record insertion. We indicate this * situation by returning the left path. * * The reason we don't adjust the records here * before the record insert is that an error * later might break the rule where a parent * record e_cpos will reflect the actual * e_cpos of the 1st nonempty record of the * child list. */ *ret_left_path = left_path; goto out_ret_path; } start = ocfs2_find_subtree_root(et, left_path, right_path); trace_ocfs2_rotate_subtree(start, (unsigned long long) right_path->p_node[start].bh->b_blocknr, right_path->p_tree_depth); ret = ocfs2_extend_rotate_transaction(handle, start, orig_credits, right_path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_subtree_right(handle, et, left_path, right_path, start); if (ret) { mlog_errno(ret); goto out; } if (split != SPLIT_NONE && ocfs2_leftmost_rec_contains(path_leaf_el(right_path), insert_cpos)) { /* * A rotate moves the rightmost left leaf * record over to the leftmost right leaf * slot. If we're doing an extent split * instead of a real insert, then we have to * check that the extent to be split wasn't * just moved over. If it was, then we can * exit here, passing left_path back - * ocfs2_split_extent() is smart enough to * search both leaves. */ *ret_left_path = left_path; goto out_ret_path; } /* * There is no need to re-read the next right path * as we know that it'll be our current left * path. Optimize by copying values instead. */ ocfs2_mv_path(right_path, left_path); ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(left_path); out_ret_path: return ret; } static int ocfs2_update_edge_lengths(handle_t *handle, struct ocfs2_extent_tree *et, int subtree_index, struct ocfs2_path *path) { int i, idx, ret; struct ocfs2_extent_rec *rec; struct ocfs2_extent_list *el; struct ocfs2_extent_block *eb; u32 range; /* * In normal tree rotation process, we will never touch the * tree branch above subtree_index and ocfs2_extend_rotate_transaction * doesn't reserve the credits for them either. * * But we do have a special case here which will update the rightmost * records for all the bh in the path. * So we have to allocate extra credits and access them. */ ret = ocfs2_extend_trans(handle, subtree_index); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } /* Path should always be rightmost. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; BUG_ON(eb->h_next_leaf_blk != 0ULL); el = &eb->h_list; BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); idx = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[idx]; range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); for (i = 0; i < path->p_tree_depth; i++) { el = path->p_node[i].el; idx = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[idx]; rec->e_int_clusters = cpu_to_le32(range); le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); ocfs2_journal_dirty(handle, path->p_node[i].bh); } out: return ret; } static void ocfs2_unlink_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path *path, int unlink_start) { int ret, i; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *bh; for(i = unlink_start; i < path_num_items(path); i++) { bh = path->p_node[i].bh; eb = (struct ocfs2_extent_block *)bh->b_data; /* * Not all nodes might have had their final count * decremented by the caller - handle this here. */ el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) > 1) { mlog(ML_ERROR, "Inode %llu, attempted to remove extent block " "%llu with %u records\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(el->l_next_free_rec)); ocfs2_journal_dirty(handle, bh); ocfs2_remove_from_cache(et->et_ci, bh); continue; } el->l_next_free_rec = 0; memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); ocfs2_journal_dirty(handle, bh); ret = ocfs2_cache_extent_block_free(dealloc, eb); if (ret) mlog_errno(ret); ocfs2_remove_from_cache(et->et_ci, bh); } } static void ocfs2_unlink_subtree(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, struct ocfs2_cached_dealloc_ctxt *dealloc) { int i; struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el; struct ocfs2_extent_list *el; struct ocfs2_extent_block *eb; el = path_leaf_el(left_path); eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) if (root_el->l_recs[i].e_blkno == eb->h_blkno) break; BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec)); memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); le16_add_cpu(&root_el->l_next_free_rec, -1); eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; eb->h_next_leaf_blk = 0; ocfs2_journal_dirty(handle, root_bh); ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); ocfs2_unlink_path(handle, et, dealloc, right_path, subtree_index + 1); } static int ocfs2_rotate_subtree_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, struct ocfs2_cached_dealloc_ctxt *dealloc, int *deleted) { int ret, i, del_right_subtree = 0, right_has_empty = 0; struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path); struct ocfs2_extent_list *right_leaf_el, *left_leaf_el; struct ocfs2_extent_block *eb; *deleted = 0; right_leaf_el = path_leaf_el(right_path); left_leaf_el = path_leaf_el(left_path); root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0])) return 0; eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) { /* * It's legal for us to proceed if the right leaf is * the rightmost one and it has an empty extent. There * are two cases to handle - whether the leaf will be * empty after removal or not. If the leaf isn't empty * then just remove the empty extent up front. The * next block will handle empty leaves by flagging * them for unlink. * * Non rightmost leaves will throw -EAGAIN and the * caller can manually move the subtree and retry. */ if (eb->h_next_leaf_blk != 0ULL) return -EAGAIN; if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { ret = ocfs2_journal_access_eb(handle, et->et_ci, path_leaf_bh(right_path), OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_empty_extent(right_leaf_el); } else right_has_empty = 1; } if (eb->h_next_leaf_blk == 0ULL && le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) { /* * We have to update i_last_eb_blk during the meta * data delete. */ ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } del_right_subtree = 1; } /* * Getting here with an empty extent in the right path implies * that it's the rightmost path and will be deleted. */ BUG_ON(right_has_empty && !del_right_subtree); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } if (!right_has_empty) { /* * Only do this if we're moving a real * record. Otherwise, the action is delayed until * after removal of the right path in which case we * can do a simple shift to remove the empty extent. */ ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]); memset(&right_leaf_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } if (eb->h_next_leaf_blk == 0ULL) { /* * Move recs over to get rid of empty extent, decrease * next_free. This is allowed to remove the last * extent in our leaf (setting l_next_free_rec to * zero) - the delete code below won't care. */ ocfs2_remove_empty_extent(right_leaf_el); } ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); if (del_right_subtree) { ocfs2_unlink_subtree(handle, et, left_path, right_path, subtree_index, dealloc); ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); /* * Removal of the extent in the left leaf was skipped * above so we could delete the right path * 1st. */ if (right_has_empty) ocfs2_remove_empty_extent(left_leaf_el); ocfs2_journal_dirty(handle, et_root_bh); *deleted = 1; } else ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); out: return ret; } /* * Given a full path, determine what cpos value would return us a path * containing the leaf immediately to the right of the current one. * * Will return zero if the path passed in is already the rightmost path. * * This looks similar, but is subtly different to * ocfs2_find_cpos_for_left_leaf(). */ int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) { int i, j, ret = 0; u64 blkno; struct ocfs2_extent_list *el; *cpos = 0; if (path->p_tree_depth == 0) return 0; blkno = path_leaf_bh(path)->b_blocknr; /* Start at the tree node just above the leaf and work our way up. */ i = path->p_tree_depth - 1; while (i >= 0) { int next_free; el = path->p_node[i].el; /* * Find the extent record just after the one in our * path. */ next_free = le16_to_cpu(el->l_next_free_rec); for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { if (j == (next_free - 1)) { if (i == 0) { /* * We've determined that the * path specified is already * the rightmost one - return a * cpos of zero. */ goto out; } /* * The rightmost record points to our * leaf - we need to travel up the * tree one level. */ goto next_node; } *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos); goto out; } } /* * If we got here, we never found a valid node where * the tree indicated one should be. */ ocfs2_error(sb, "Invalid extent tree at extent block %llu\n", (unsigned long long)blkno); ret = -EROFS; goto out; next_node: blkno = path->p_node[i].bh->b_blocknr; i--; } out: return ret; } static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path) { int ret; struct buffer_head *bh = path_leaf_bh(path); struct ocfs2_extent_list *el = path_leaf_el(path); if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_empty_extent(el); ocfs2_journal_dirty(handle, bh); out: return ret; } static int __ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, int orig_credits, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path **empty_extent_path) { int ret, subtree_root, deleted; u32 right_cpos; struct ocfs2_path *left_path = NULL; struct ocfs2_path *right_path = NULL; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); *empty_extent_path = NULL; ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ocfs2_cp_path(left_path, path); right_path = ocfs2_new_path_from_path(path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } while (right_cpos) { ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; } subtree_root = ocfs2_find_subtree_root(et, left_path, right_path); trace_ocfs2_rotate_subtree(subtree_root, (unsigned long long) right_path->p_node[subtree_root].bh->b_blocknr, right_path->p_tree_depth); ret = ocfs2_extend_rotate_transaction(handle, subtree_root, orig_credits, left_path); if (ret) { mlog_errno(ret); goto out; } /* * Caller might still want to make changes to the * tree root, so re-add it to the journal here. */ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, 0); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_subtree_left(handle, et, left_path, right_path, subtree_root, dealloc, &deleted); if (ret == -EAGAIN) { /* * The rotation has to temporarily stop due to * the right subtree having an empty * extent. Pass it back to the caller for a * fixup. */ *empty_extent_path = right_path; right_path = NULL; goto out; } if (ret) { mlog_errno(ret); goto out; } /* * The subtree rotate might have removed records on * the rightmost edge. If so, then rotation is * complete. */ if (deleted) break; ocfs2_mv_path(left_path, right_path); ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(right_path); ocfs2_free_path(left_path); return ret; } static int ocfs2_remove_rightmost_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, subtree_index; u32 cpos; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; ret = ocfs2_et_sanity_check(et); if (ret) goto out; /* * There's two ways we handle this depending on * whether path is the only existing one. */ ret = ocfs2_extend_rotate_transaction(handle, 0, handle->h_buffer_credits, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), path, &cpos); if (ret) { mlog_errno(ret); goto out; } if (cpos) { /* * We have a path to the left of this one - it needs * an update too. */ left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; } subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_unlink_subtree(handle, et, left_path, path, subtree_index, dealloc); ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); } else { /* * 'path' is also the leftmost path which * means it must be the only one. This gets * handled differently because we want to * revert the root back to having extents * in-line. */ ocfs2_unlink_path(handle, et, dealloc, path, 1); el = et->et_root_el; el->l_tree_depth = 0; el->l_next_free_rec = 0; memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); ocfs2_et_set_last_eb_blk(et, 0); } ocfs2_journal_dirty(handle, path_root_bh(path)); out: ocfs2_free_path(left_path); return ret; } /* * Left rotation of btree records. * * In many ways, this is (unsurprisingly) the opposite of right * rotation. We start at some non-rightmost path containing an empty * extent in the leaf block. The code works its way to the rightmost * path by rotating records to the left in every subtree. * * This is used by any code which reduces the number of extent records * in a leaf. After removal, an empty record should be placed in the * leftmost list position. * * This won't handle a length update of the rightmost path records if * the rightmost tree leaf record is removed so the caller is * responsible for detecting and correcting that. */ static int ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, orig_credits = handle->h_buffer_credits; struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; el = path_leaf_el(path); if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; if (path->p_tree_depth == 0) { rightmost_no_delete: /* * Inline extents. This is trivially handled, so do * it up front. */ ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path); if (ret) mlog_errno(ret); goto out; } /* * Handle rightmost branch now. There's several cases: * 1) simple rotation leaving records in there. That's trivial. * 2) rotation requiring a branch delete - there's no more * records left. Two cases of this: * a) There are branches to the left. * b) This is also the leftmost (the only) branch. * * 1) is handled via ocfs2_rotate_rightmost_leaf_left() * 2a) we need the left branch so that we can update it with the unlink * 2b) we need to bring the root back to inline extents. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; el = &eb->h_list; if (eb->h_next_leaf_blk == 0) { /* * This gets a bit tricky if we're going to delete the * rightmost path. Get the other cases out of the way * 1st. */ if (le16_to_cpu(el->l_next_free_rec) > 1) goto rightmost_no_delete; if (le16_to_cpu(el->l_next_free_rec) == 0) { ret = -EIO; ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has empty extent block at %llu", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno)); goto out; } /* * XXX: The caller can not trust "path" any more after * this as it will have been deleted. What do we do? * * In theory the rotate-for-merge code will never get * here because it'll always ask for a rotate in a * nonempty list. */ ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc); if (ret) mlog_errno(ret); goto out; } /* * Now we can loop, remembering the path we get from -EAGAIN * and restarting from there. */ try_rotate: ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path, dealloc, &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; } while (ret == -EAGAIN) { tmp_path = restart_path; restart_path = NULL; ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, tmp_path, dealloc, &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; } ocfs2_free_path(tmp_path); tmp_path = NULL; if (ret == 0) goto try_rotate; } out: ocfs2_free_path(tmp_path); ocfs2_free_path(restart_path); return ret; } static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, int index) { struct ocfs2_extent_rec *rec = &el->l_recs[index]; unsigned int size; if (rec->e_leaf_clusters == 0) { /* * We consumed all of the merged-from record. An empty * extent cannot exist anywhere but the 1st array * position, so move things over if the merged-from * record doesn't occupy that position. * * This creates a new empty extent so the caller * should be smart enough to have removed any existing * ones. */ if (index > 0) { BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); size = index * sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[1], &el->l_recs[0], size); } /* * Always memset - the caller doesn't check whether it * created an empty extent, so there could be junk in * the other fields. */ memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } } static int ocfs2_get_right_path(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path **ret_right_path) { int ret; u32 right_cpos; struct ocfs2_path *right_path = NULL; struct ocfs2_extent_list *left_el; *ret_right_path = NULL; /* This function shouldn't be called for non-trees. */ BUG_ON(left_path->p_tree_depth == 0); left_el = path_leaf_el(left_path); BUG_ON(left_el->l_next_free_rec != left_el->l_count); ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci), left_path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } /* This function shouldn't be called for the rightmost leaf. */ BUG_ON(right_cpos == 0); right_path = ocfs2_new_path_from_path(left_path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; } *ret_right_path = right_path; out: if (ret) ocfs2_free_path(right_path); return ret; } /* * Remove split_rec clusters from the record at index and merge them * onto the beginning of the record "next" to it. * For index < l_count - 1, the next means the extent rec at index + 1. * For index == l_count - 1, the "next" means the 1st extent rec of the * next extent block. */ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path, handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, int index) { int ret, next_free, i; unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); struct ocfs2_extent_rec *left_rec; struct ocfs2_extent_rec *right_rec; struct ocfs2_extent_list *right_el; struct ocfs2_path *right_path = NULL; int subtree_index = 0; struct ocfs2_extent_list *el = path_leaf_el(left_path); struct buffer_head *bh = path_leaf_bh(left_path); struct buffer_head *root_bh = NULL; BUG_ON(index >= le16_to_cpu(el->l_next_free_rec)); left_rec = &el->l_recs[index]; if (index == le16_to_cpu(el->l_next_free_rec) - 1 && le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { /* we meet with a cross extent block merge. */ ret = ocfs2_get_right_path(et, left_path, &right_path); if (ret) { mlog_errno(ret); goto out; } right_el = path_leaf_el(right_path); next_free = le16_to_cpu(right_el->l_next_free_rec); BUG_ON(next_free <= 0); right_rec = &right_el->l_recs[0]; if (ocfs2_is_empty_extent(right_rec)) { BUG_ON(next_free <= 1); right_rec = &right_el->l_recs[1]; } BUG_ON(le32_to_cpu(left_rec->e_cpos) + le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(right_rec->e_cpos)); subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, right_path); if (ret) { mlog_errno(ret); goto out; } root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for (i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } } else { BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1); right_rec = &el->l_recs[index + 1]; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, path_num_items(left_path) - 1); if (ret) { mlog_errno(ret); goto out; } le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters); le32_add_cpu(&right_rec->e_cpos, -split_clusters); le64_add_cpu(&right_rec->e_blkno, -ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); ocfs2_cleanup_merge(el, index); ocfs2_journal_dirty(handle, bh); if (right_path) { ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } out: if (right_path) ocfs2_free_path(right_path); return ret; } static int ocfs2_get_left_path(struct ocfs2_extent_tree *et, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret; u32 left_cpos; struct ocfs2_path *left_path = NULL; *ret_left_path = NULL; /* This function shouldn't be called for non-trees. */ BUG_ON(right_path->p_tree_depth == 0); ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), right_path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } /* This function shouldn't be called for the leftmost leaf. */ BUG_ON(left_cpos == 0); left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } *ret_left_path = left_path; out: if (ret) ocfs2_free_path(left_path); return ret; } /* * Remove split_rec clusters from the record at index and merge them * onto the tail of the record "before" it. * For index > 0, the "before" means the extent rec at index - 1. * * For index == 0, the "before" means the last record of the previous * extent block. And there is also a situation that we may need to * remove the rightmost leaf extent block in the right_path and change * the right path to indicate the new rightmost path. */ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, int index) { int ret, i, subtree_index = 0, has_empty_extent = 0; unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); struct ocfs2_extent_rec *left_rec; struct ocfs2_extent_rec *right_rec; struct ocfs2_extent_list *el = path_leaf_el(right_path); struct buffer_head *bh = path_leaf_bh(right_path); struct buffer_head *root_bh = NULL; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *left_el; BUG_ON(index < 0); right_rec = &el->l_recs[index]; if (index == 0) { /* we meet with a cross extent block merge. */ ret = ocfs2_get_left_path(et, right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } left_el = path_leaf_el(left_path); BUG_ON(le16_to_cpu(left_el->l_next_free_rec) != le16_to_cpu(left_el->l_count)); left_rec = &left_el->l_recs[ le16_to_cpu(left_el->l_next_free_rec) - 1]; BUG_ON(le32_to_cpu(left_rec->e_cpos) + le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(split_rec->e_cpos)); subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, left_path); if (ret) { mlog_errno(ret); goto out; } root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for (i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } } else { left_rec = &el->l_recs[index - 1]; if (ocfs2_is_empty_extent(&el->l_recs[0])) has_empty_extent = 1; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, path_num_items(right_path) - 1); if (ret) { mlog_errno(ret); goto out; } if (has_empty_extent && index == 1) { /* * The easy case - we can just plop the record right in. */ *left_rec = *split_rec; has_empty_extent = 0; } else le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); le32_add_cpu(&right_rec->e_cpos, split_clusters); le64_add_cpu(&right_rec->e_blkno, ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); ocfs2_cleanup_merge(el, index); ocfs2_journal_dirty(handle, bh); if (left_path) { ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); /* * In the situation that the right_rec is empty and the extent * block is empty also, ocfs2_complete_edge_insert can't handle * it and we need to delete the right extent block. */ if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && le16_to_cpu(el->l_next_free_rec) == 1) { ret = ocfs2_remove_rightmost_path(handle, et, right_path, dealloc); if (ret) { mlog_errno(ret); goto out; } /* Now the rightmost extent block has been deleted. * So we use the new rightmost path. */ ocfs2_mv_path(right_path, left_path); left_path = NULL; } else ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } out: if (left_path) ocfs2_free_path(left_path); return ret; } static int ocfs2_try_to_merge_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_merge_ctxt *ctxt) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; BUG_ON(ctxt->c_contig_type == CONTIG_NONE); if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { /* * The merge code will need to create an empty * extent to take the place of the newly * emptied slot. Remove any pre-existing empty * extents - having more than one in a leaf is * illegal. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } split_index--; rec = &el->l_recs[split_index]; } if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { /* * Left-right contig implies this. */ BUG_ON(!ctxt->c_split_covers_rec); /* * Since the leftright insert always covers the entire * extent, this call will delete the insert record * entirely, resulting in an empty extent record added to * the extent block. * * Since the adding of an empty extent shifts * everything back to the right, there's no need to * update split_index here. * * When the split_index is zero, we need to merge it to the * prevoius extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ ret = ocfs2_merge_rec_right(path, handle, et, split_rec, split_index); if (ret) { mlog_errno(ret); goto out; } /* * We can only get this from logic error above. */ BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); /* The merge left us with an empty extent, remove it. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } rec = &el->l_recs[split_index]; /* * Note that we don't pass split_rec here on purpose - * we've merged it into the rec already. */ ret = ocfs2_merge_rec_left(path, handle, et, rec, dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); /* * Error from this last rotate is not critical, so * print but don't bubble it up. */ if (ret) mlog_errno(ret); ret = 0; } else { /* * Merge a record to the left or right. * * 'contig_type' is relative to the existing record, * so for example, if we're "right contig", it's to * the record on the left (hence the left merge). */ if (ctxt->c_contig_type == CONTIG_RIGHT) { ret = ocfs2_merge_rec_left(path, handle, et, split_rec, dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } } else { ret = ocfs2_merge_rec_right(path, handle, et, split_rec, split_index); if (ret) { mlog_errno(ret); goto out; } } if (ctxt->c_split_covers_rec) { /* * The merge may have left an empty extent in * our leaf. Try to rotate it away. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) mlog_errno(ret); ret = 0; } } out: return ret; } static void ocfs2_subtract_from_rec(struct super_block *sb, enum ocfs2_split_type split, struct ocfs2_extent_rec *rec, struct ocfs2_extent_rec *split_rec) { u64 len_blocks; len_blocks = ocfs2_clusters_to_blocks(sb, le16_to_cpu(split_rec->e_leaf_clusters)); if (split == SPLIT_LEFT) { /* * Region is on the left edge of the existing * record. */ le32_add_cpu(&rec->e_cpos, le16_to_cpu(split_rec->e_leaf_clusters)); le64_add_cpu(&rec->e_blkno, len_blocks); le16_add_cpu(&rec->e_leaf_clusters, -le16_to_cpu(split_rec->e_leaf_clusters)); } else { /* * Region is on the right edge of the existing * record. */ le16_add_cpu(&rec->e_leaf_clusters, -le16_to_cpu(split_rec->e_leaf_clusters)); } } /* * Do the final bits of extent record insertion at the target leaf * list. If this leaf is part of an allocation tree, it is assumed * that the tree above has been prepared. */ static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_extent_list *el, struct ocfs2_insert_type *insert) { int i = insert->ins_contig_index; unsigned int range; struct ocfs2_extent_rec *rec; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (insert->ins_split != SPLIT_NONE) { i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); BUG_ON(i == -1); rec = &el->l_recs[i]; ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), insert->ins_split, rec, insert_rec); goto rotate; } /* * Contiguous insert - either left or right. */ if (insert->ins_contig != CONTIG_NONE) { rec = &el->l_recs[i]; if (insert->ins_contig == CONTIG_LEFT) { rec->e_blkno = insert_rec->e_blkno; rec->e_cpos = insert_rec->e_cpos; } le16_add_cpu(&rec->e_leaf_clusters, le16_to_cpu(insert_rec->e_leaf_clusters)); return; } /* * Handle insert into an empty leaf. */ if (le16_to_cpu(el->l_next_free_rec) == 0 || ((le16_to_cpu(el->l_next_free_rec) == 1) && ocfs2_is_empty_extent(&el->l_recs[0]))) { el->l_recs[0] = *insert_rec; el->l_next_free_rec = cpu_to_le16(1); return; } /* * Appending insert. */ if (insert->ins_appending == APPEND_TAIL) { i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; range = le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters); BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range); mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= le16_to_cpu(el->l_count), "owner %llu, depth %u, count %u, next free %u, " "rec.cpos %u, rec.clusters %u, " "insert.cpos %u, insert.clusters %u\n", ocfs2_metadata_cache_owner(et->et_ci), le16_to_cpu(el->l_tree_depth), le16_to_cpu(el->l_count), le16_to_cpu(el->l_next_free_rec), le32_to_cpu(el->l_recs[i].e_cpos), le16_to_cpu(el->l_recs[i].e_leaf_clusters), le32_to_cpu(insert_rec->e_cpos), le16_to_cpu(insert_rec->e_leaf_clusters)); i++; el->l_recs[i] = *insert_rec; le16_add_cpu(&el->l_next_free_rec, 1); return; } rotate: /* * Ok, we have to rotate. * * At this point, it is safe to assume that inserting into an * empty leaf and appending to a leaf have both been handled * above. * * This leaf needs to have space, either by the empty 1st * extent record, or by virtue of an l_next_rec < l_count. */ ocfs2_rotate_leaf(el, insert_rec); } static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec) { int ret, i, next_free; struct buffer_head *bh; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; /* * Update everything except the leaf block. */ for (i = 0; i < path->p_tree_depth; i++) { bh = path->p_node[i].bh; el = path->p_node[i].el; next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has a bad extent list", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); ret = -EIO; return; } rec = &el->l_recs[next_free - 1]; rec->e_int_clusters = insert_rec->e_cpos; le32_add_cpu(&rec->e_int_clusters, le16_to_cpu(insert_rec->e_leaf_clusters)); le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); ocfs2_journal_dirty(handle, bh); } } static int ocfs2_append_rec_to_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret, next_free; struct ocfs2_extent_list *el; struct ocfs2_path *left_path = NULL; *ret_left_path = NULL; /* * This shouldn't happen for non-trees. The extent rec cluster * count manipulation below only works for interior nodes. */ BUG_ON(right_path->p_tree_depth == 0); /* * If our appending insert is at the leftmost edge of a leaf, * then we might need to update the rightmost records of the * neighboring path. */ el = path_leaf_el(right_path); next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0 || (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { u32 left_cpos; ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), right_path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_append_rec_to_path( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), le32_to_cpu(insert_rec->e_cpos), left_cpos); /* * No need to worry if the append is already in the * leftmost leaf. */ if (left_cpos) { left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } /* * ocfs2_insert_path() will pass the left_path to the * journal for us. */ } } ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret) { mlog_errno(ret); goto out; } ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec); *ret_left_path = left_path; ret = 0; out: if (ret != 0) ocfs2_free_path(left_path); return ret; } static void ocfs2_split_record(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *split_rec, enum ocfs2_split_type split) { int index; u32 cpos = le32_to_cpu(split_rec->e_cpos); struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; struct ocfs2_extent_rec *rec, *tmprec; right_el = path_leaf_el(right_path); if (left_path) left_el = path_leaf_el(left_path); el = right_el; insert_el = right_el; index = ocfs2_search_extent_list(el, cpos); if (index != -1) { if (index == 0 && left_path) { BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); /* * This typically means that the record * started in the left path but moved to the * right as a result of rotation. We either * move the existing record to the left, or we * do the later insert there. * * In this case, the left path should always * exist as the rotate code will have passed * it back for a post-insert update. */ if (split == SPLIT_LEFT) { /* * It's a left split. Since we know * that the rotate code gave us an * empty extent in the left path, we * can just do the insert there. */ insert_el = left_el; } else { /* * Right split - we have to move the * existing record over to the left * leaf. The insert will be into the * newly created empty extent in the * right leaf. */ tmprec = &right_el->l_recs[index]; ocfs2_rotate_leaf(left_el, tmprec); el = left_el; memset(tmprec, 0, sizeof(*tmprec)); index = ocfs2_search_extent_list(left_el, cpos); BUG_ON(index == -1); } } } else { BUG_ON(!left_path); BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0])); /* * Left path is easy - we can just allow the insert to * happen. */ el = left_el; insert_el = left_el; index = ocfs2_search_extent_list(el, cpos); BUG_ON(index == -1); } rec = &el->l_recs[index]; ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), split, rec, split_rec); ocfs2_rotate_leaf(insert_el, split_rec); } /* * This function only does inserts on an allocation b-tree. For tree * depth = 0, ocfs2_insert_at_leaf() is called directly. * * right_path is the path we want to do the actual insert * in. left_path should only be passed in if we need to update that * portion of the tree after an edge insert. */ static int ocfs2_insert_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *insert_rec, struct ocfs2_insert_type *insert) { int ret, subtree_index; struct buffer_head *leaf_bh = path_leaf_bh(right_path); if (left_path) { /* * There's a chance that left_path got passed back to * us without being accounted for in the * journal. Extend our transaction here to be sure we * can change those blocks. */ ret = ocfs2_extend_trans(handle, left_path->p_tree_depth); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret < 0) { mlog_errno(ret); goto out; } } /* * Pass both paths to the journal. The majority of inserts * will be touching all components anyway. */ ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret < 0) { mlog_errno(ret); goto out; } if (insert->ins_split != SPLIT_NONE) { /* * We could call ocfs2_insert_at_leaf() for some types * of splits, but it's easier to just let one separate * function sort it all out. */ ocfs2_split_record(et, left_path, right_path, insert_rec, insert->ins_split); /* * Split might have modified either leaf and we don't * have a guarantee that the later edge insert will * dirty this for us. */ if (left_path) ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); } else ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path), insert); ocfs2_journal_dirty(handle, leaf_bh); if (left_path) { /* * The rotate code has indicated that we need to fix * up portions of the tree after the insert. * * XXX: Should we extend the transaction here? */ subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } ret = 0; out: return ret; } static int ocfs2_do_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_insert_type *type) { int ret, rotate = 0; u32 cpos; struct ocfs2_path *right_path = NULL; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el; el = et->et_root_el; ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (le16_to_cpu(el->l_tree_depth) == 0) { ocfs2_insert_at_leaf(et, insert_rec, el, type); goto out_update_clusters; } right_path = ocfs2_new_path_from_et(et); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } /* * Determine the path to start with. Rotations need the * rightmost path, everything else can go directly to the * target leaf. */ cpos = le32_to_cpu(insert_rec->e_cpos); if (type->ins_appending == APPEND_NONE && type->ins_contig == CONTIG_NONE) { rotate = 1; cpos = UINT_MAX; } ret = ocfs2_find_path(et->et_ci, right_path, cpos); if (ret) { mlog_errno(ret); goto out; } /* * Rotations and appends need special treatment - they modify * parts of the tree's above them. * * Both might pass back a path immediate to the left of the * one being inserted to. This will be cause * ocfs2_insert_path() to modify the rightmost records of * left_path to account for an edge insert. * * XXX: When modifying this code, keep in mind that an insert * can wind up skipping both of these two special cases... */ if (rotate) { ret = ocfs2_rotate_tree_right(handle, et, type->ins_split, le32_to_cpu(insert_rec->e_cpos), right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } /* * ocfs2_rotate_tree_right() might have extended the * transaction without re-journaling our tree root. */ ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } } else if (type->ins_appending == APPEND_TAIL && type->ins_contig != CONTIG_LEFT) { ret = ocfs2_append_rec_to_path(handle, et, insert_rec, right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_insert_path(handle, et, left_path, right_path, insert_rec, type); if (ret) { mlog_errno(ret); goto out; } out_update_clusters: if (type->ins_split == SPLIT_NONE) ocfs2_et_update_clusters(et, le16_to_cpu(insert_rec->e_leaf_clusters)); ocfs2_journal_dirty(handle, et->et_root_bh); out: ocfs2_free_path(left_path); ocfs2_free_path(right_path); return ret; } static enum ocfs2_contig_type ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int index, struct ocfs2_extent_rec *split_rec) { int status; enum ocfs2_contig_type ret = CONTIG_NONE; u32 left_cpos, right_cpos; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_extent_list *new_el; struct ocfs2_path *left_path = NULL, *right_path = NULL; struct buffer_head *bh; struct ocfs2_extent_block *eb; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); if (index > 0) { rec = &el->l_recs[index - 1]; } else if (path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (status) goto out; if (left_cpos != 0) { left_path = ocfs2_new_path_from_path(path); if (!left_path) goto out; status = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (status) goto out; new_el = path_leaf_el(left_path); if (le16_to_cpu(new_el->l_next_free_rec) != le16_to_cpu(new_el->l_count)) { bh = path_leaf_bh(left_path); eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of " "%d. It should have " "matched the l_count of %d", (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(new_el->l_next_free_rec), le16_to_cpu(new_el->l_count)); status = -EINVAL; goto out; } rec = &new_el->l_recs[ le16_to_cpu(new_el->l_next_free_rec) - 1]; } } /* * We're careful to check for an empty extent record here - * the merge code will know what to do if it sees one. */ if (rec) { if (index == 1 && ocfs2_is_empty_extent(rec)) { if (split_rec->e_cpos == el->l_recs[index].e_cpos) ret = CONTIG_RIGHT; } else { ret = ocfs2_et_extent_contig(et, rec, split_rec); } } rec = NULL; if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) rec = &el->l_recs[index + 1]; else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (status) goto out; if (right_cpos == 0) goto out; right_path = ocfs2_new_path_from_path(path); if (!right_path) goto out; status = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (status) goto out; new_el = path_leaf_el(right_path); rec = &new_el->l_recs[0]; if (ocfs2_is_empty_extent(rec)) { if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { bh = path_leaf_bh(right_path); eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of %d", (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(new_el->l_next_free_rec)); status = -EINVAL; goto out; } rec = &new_el->l_recs[1]; } } if (rec) { enum ocfs2_contig_type contig_type; contig_type = ocfs2_et_extent_contig(et, rec, split_rec); if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) ret = CONTIG_LEFTRIGHT; else if (ret == CONTIG_NONE) ret = contig_type; } out: if (left_path) ocfs2_free_path(left_path); if (right_path) ocfs2_free_path(right_path); return ret; } static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_insert_type *insert, struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i; enum ocfs2_contig_type contig_type = CONTIG_NONE; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i], insert_rec); if (contig_type != CONTIG_NONE) { insert->ins_contig_index = i; break; } } insert->ins_contig = contig_type; if (insert->ins_contig != CONTIG_NONE) { struct ocfs2_extent_rec *rec = &el->l_recs[insert->ins_contig_index]; unsigned int len = le16_to_cpu(rec->e_leaf_clusters) + le16_to_cpu(insert_rec->e_leaf_clusters); /* * Caller might want us to limit the size of extents, don't * calculate contiguousness if we might exceed that limit. */ if (et->et_max_leaf_clusters && (len > et->et_max_leaf_clusters)) insert->ins_contig = CONTIG_NONE; } } /* * This should only be called against the righmost leaf extent list. * * ocfs2_figure_appending_type() will figure out whether we'll have to * insert at the tail of the rightmost leaf. * * This should also work against the root extent list for tree's with 0 * depth. If we consider the root extent list to be the rightmost leaf node * then the logic here makes sense. */ static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert, struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i; u32 cpos = le32_to_cpu(insert_rec->e_cpos); struct ocfs2_extent_rec *rec; insert->ins_appending = APPEND_NONE; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (!el->l_next_free_rec) goto set_tail_append; if (ocfs2_is_empty_extent(&el->l_recs[0])) { /* Were all records empty? */ if (le16_to_cpu(el->l_next_free_rec) == 1) goto set_tail_append; } i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; if (cpos >= (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters))) goto set_tail_append; return; set_tail_append: insert->ins_appending = APPEND_TAIL; } /* * Helper function called at the beginning of an insert. * * This computes a few things that are commonly used in the process of * inserting into the btree: * - Whether the new extent is contiguous with an existing one. * - The current tree depth. * - Whether the insert is an appending one. * - The total # of free records in the tree. * * All of the information is stored on the ocfs2_insert_type * structure. */ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et, struct buffer_head **last_eb_bh, struct ocfs2_extent_rec *insert_rec, int *free_records, struct ocfs2_insert_type *insert) { int ret; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_path *path = NULL; struct buffer_head *bh = NULL; insert->ins_split = SPLIT_NONE; el = et->et_root_el; insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth); if (el->l_tree_depth) { /* * If we have tree depth, we read in the * rightmost extent block ahead of time as * ocfs2_figure_insert_type() and ocfs2_add_branch() * may want it later. */ ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; } /* * Unless we have a contiguous insert, we'll need to know if * there is room left in our allocation tree for another * extent record. * * XXX: This test is simplistic, we can search for empty * extent records too. */ *free_records = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); if (!insert->ins_tree_depth) { ocfs2_figure_contig_type(et, insert, el, insert_rec); ocfs2_figure_appending_type(insert, el, insert_rec); return 0; } path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } /* * In the case that we're inserting past what the tree * currently accounts for, ocfs2_find_path() will return for * us the rightmost tree path. This is accounted for below in * the appending code. */ ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos)); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); /* * Now that we have the path, there's two things we want to determine: * 1) Contiguousness (also set contig_index if this is so) * * 2) Are we doing an append? We can trivially break this up * into two types of appends: simple record append, or a * rotate inside the tail leaf. */ ocfs2_figure_contig_type(et, insert, el, insert_rec); /* * The insert code isn't quite ready to deal with all cases of * left contiguousness. Specifically, if it's an insert into * the 1st record in a leaf, it will require the adjustment of * cluster count on the last record of the path directly to it's * left. For now, just catch that case and fool the layers * above us. This works just fine for tree_depth == 0, which * is why we allow that above. */ if (insert->ins_contig == CONTIG_LEFT && insert->ins_contig_index == 0) insert->ins_contig = CONTIG_NONE; /* * Ok, so we can simply compare against last_eb to figure out * whether the path doesn't exist. This will only happen in * the case that we're doing a tail append, so maybe we can * take advantage of that information somehow. */ if (ocfs2_et_get_last_eb_blk(et) == path_leaf_bh(path)->b_blocknr) { /* * Ok, ocfs2_find_path() returned us the rightmost * tree path. This might be an appending insert. There are * two cases: * 1) We're doing a true append at the tail: * -This might even be off the end of the leaf * 2) We're "appending" by rotating in the tail */ ocfs2_figure_appending_type(insert, el, insert_rec); } out: ocfs2_free_path(path); if (ret == 0) *last_eb_bh = bh; else brelse(bh); return ret; } /* * Insert an extent into a btree. * * The caller needs to update the owning btree's cluster count. */ int ocfs2_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u64 start_blk, u32 new_clusters, u8 flags, struct ocfs2_alloc_context *meta_ac) { int status; int uninitialized_var(free_records); struct buffer_head *last_eb_bh = NULL; struct ocfs2_insert_type insert = {0, }; struct ocfs2_extent_rec rec; trace_ocfs2_insert_extent_start( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, new_clusters); memset(&rec, 0, sizeof(rec)); rec.e_cpos = cpu_to_le32(cpos); rec.e_blkno = cpu_to_le64(start_blk); rec.e_leaf_clusters = cpu_to_le16(new_clusters); rec.e_flags = flags; status = ocfs2_et_insert_check(et, &rec); if (status) { mlog_errno(status); goto bail; } status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec, &free_records, &insert); if (status < 0) { mlog_errno(status); goto bail; } trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig, insert.ins_contig_index, free_records, insert.ins_tree_depth); if (insert.ins_contig == CONTIG_NONE && free_records == 0) { status = ocfs2_grow_tree(handle, et, &insert.ins_tree_depth, &last_eb_bh, meta_ac); if (status) { mlog_errno(status); goto bail; } } /* Finally, we can add clusters. This might rotate the tree for us. */ status = ocfs2_do_insert_extent(handle, et, &rec, &insert); if (status < 0) mlog_errno(status); else ocfs2_et_extent_map_insert(et, &rec); bail: brelse(last_eb_bh); return status; } /* * Allcate and add clusters into the extent b-tree. * The new clusters(clusters_to_add) will be inserted at logical_offset. * The extent b-tree's root is specified by et, and * it is not limited to the file storage. Any extent tree can use this * function if it implements the proper ocfs2_extent_tree. */ int ocfs2_add_clusters_in_btree(handle_t *handle, struct ocfs2_extent_tree *et, u32 *logical_offset, u32 clusters_to_add, int mark_unwritten, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret) { int status = 0, err = 0; int free_extents; enum ocfs2_alloc_restarted reason = RESTART_NONE; u32 bit_off, num_bits; u64 block; u8 flags = 0; struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); BUG_ON(!clusters_to_add); if (mark_unwritten) flags = OCFS2_EXT_UNWRITTEN; free_extents = ocfs2_num_free_extents(osb, et); if (free_extents < 0) { status = free_extents; mlog_errno(status); goto leave; } /* there are two cases which could cause us to EAGAIN in the * we-need-more-metadata case: * 1) we haven't reserved *any* * 2) we are so fragmented, we've needed to add metadata too * many times. */ if (!free_extents && !meta_ac) { err = -1; status = -EAGAIN; reason = RESTART_META; goto leave; } else if ((!free_extents) && (ocfs2_alloc_context_bits_left(meta_ac) < ocfs2_extend_meta_needed(et->et_root_el))) { err = -2; status = -EAGAIN; reason = RESTART_META; goto leave; } status = __ocfs2_claim_clusters(handle, data_ac, 1, clusters_to_add, &bit_off, &num_bits); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto leave; } BUG_ON(num_bits > clusters_to_add); /* reserve our write early -- insert_extent may update the tree root */ status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } block = ocfs2_clusters_to_blocks(osb->sb, bit_off); trace_ocfs2_add_clusters_in_btree( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), bit_off, num_bits); status = ocfs2_insert_extent(handle, et, *logical_offset, block, num_bits, flags, meta_ac); if (status < 0) { mlog_errno(status); goto leave; } ocfs2_journal_dirty(handle, et->et_root_bh); clusters_to_add -= num_bits; *logical_offset += num_bits; if (clusters_to_add) { err = clusters_to_add; status = -EAGAIN; reason = RESTART_TRANS; } leave: if (reason_ret) *reason_ret = reason; trace_ocfs2_add_clusters_in_btree_ret(status, reason, err); return status; } static void ocfs2_make_right_split_rec(struct super_block *sb, struct ocfs2_extent_rec *split_rec, u32 cpos, struct ocfs2_extent_rec *rec) { u32 rec_cpos = le32_to_cpu(rec->e_cpos); u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters); memset(split_rec, 0, sizeof(struct ocfs2_extent_rec)); split_rec->e_cpos = cpu_to_le32(cpos); split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos); split_rec->e_blkno = rec->e_blkno; le64_add_cpu(&split_rec->e_blkno, ocfs2_clusters_to_blocks(sb, cpos - rec_cpos)); split_rec->e_flags = rec->e_flags; } static int ocfs2_split_and_insert(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct buffer_head **last_eb_bh, int split_index, struct ocfs2_extent_rec *orig_split_rec, struct ocfs2_alloc_context *meta_ac) { int ret = 0, depth; unsigned int insert_range, rec_range, do_leftright = 0; struct ocfs2_extent_rec tmprec; struct ocfs2_extent_list *rightmost_el; struct ocfs2_extent_rec rec; struct ocfs2_extent_rec split_rec = *orig_split_rec; struct ocfs2_insert_type insert; struct ocfs2_extent_block *eb; leftright: /* * Store a copy of the record on the stack - it might move * around as the tree is manipulated below. */ rec = path_leaf_el(path)->l_recs[split_index]; rightmost_el = et->et_root_el; depth = le16_to_cpu(rightmost_el->l_tree_depth); if (depth) { BUG_ON(!(*last_eb_bh)); eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; rightmost_el = &eb->h_list; } if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { ret = ocfs2_grow_tree(handle, et, &depth, last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } } memset(&insert, 0, sizeof(struct ocfs2_insert_type)); insert.ins_appending = APPEND_NONE; insert.ins_contig = CONTIG_NONE; insert.ins_tree_depth = depth; insert_range = le32_to_cpu(split_rec.e_cpos) + le16_to_cpu(split_rec.e_leaf_clusters); rec_range = le32_to_cpu(rec.e_cpos) + le16_to_cpu(rec.e_leaf_clusters); if (split_rec.e_cpos == rec.e_cpos) { insert.ins_split = SPLIT_LEFT; } else if (insert_range == rec_range) { insert.ins_split = SPLIT_RIGHT; } else { /* * Left/right split. We fake this as a right split * first and then make a second pass as a left split. */ insert.ins_split = SPLIT_RIGHT; ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), &tmprec, insert_range, &rec); split_rec = tmprec; BUG_ON(do_leftright); do_leftright = 1; } ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) { mlog_errno(ret); goto out; } if (do_leftright == 1) { u32 cpos; struct ocfs2_extent_list *el; do_leftright++; split_rec = *orig_split_rec; ocfs2_reinit_path(path, 1); cpos = le32_to_cpu(split_rec.e_cpos); ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); split_index = ocfs2_search_extent_list(el, cpos); goto leftright; } out: return ret; } static int ocfs2_replace_extent_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int split_index, struct ocfs2_extent_rec *split_rec) { int ret; ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); goto out; } el->l_recs[split_index] = *split_rec; ocfs2_journal_dirty(handle, path_leaf_bh(path)); out: return ret; } /* * Split part or all of the extent record at split_index in the leaf * pointed to by path. Merge with the contiguous extent record if needed. * * Care is taken to handle contiguousness so as to not grow the tree. * * meta_ac is not strictly necessary - we only truly need it if growth * of the tree is required. All other cases will degrade into a less * optimal tree layout. * * last_eb_bh should be the rightmost leaf block for any extent * btree. Since a split may grow the tree or a merge might shrink it, * the caller cannot trust the contents of that buffer after this call. * * This code is optimized for readability - several passes might be * made over certain portions of the tree. All of those blocks will * have been brought into cache (and pinned via the journal), so the * extra overhead is not expressed in terms of disk reads. */ int ocfs2_split_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); struct buffer_head *last_eb_bh = NULL; struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; struct ocfs2_merge_ctxt ctxt; struct ocfs2_extent_list *rightmost_el; if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { ret = -EIO; mlog_errno(ret); goto out; } ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el, split_index, split_rec); /* * The core merge / split code wants to know how much room is * left in this allocation tree, so we pass the * rightmost extent list. */ if (path->p_tree_depth) { struct ocfs2_extent_block *eb; ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; rightmost_el = &eb->h_list; } else rightmost_el = path_root_el(path); if (rec->e_cpos == split_rec->e_cpos && rec->e_leaf_clusters == split_rec->e_leaf_clusters) ctxt.c_split_covers_rec = 1; else ctxt.c_split_covers_rec = 0; ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, ctxt.c_split_covers_rec); if (ctxt.c_contig_type == CONTIG_NONE) { if (ctxt.c_split_covers_rec) ret = ocfs2_replace_extent_rec(handle, et, path, el, split_index, split_rec); else ret = ocfs2_split_and_insert(handle, et, path, &last_eb_bh, split_index, split_rec, meta_ac); if (ret) mlog_errno(ret); } else { ret = ocfs2_try_to_merge_extent(handle, et, path, split_index, split_rec, dealloc, &ctxt); if (ret) mlog_errno(ret); } out: brelse(last_eb_bh); return ret; } /* * Change the flags of the already-existing extent at cpos for len clusters. * * new_flags: the flags we want to set. * clear_flags: the flags we want to clear. * phys: the new physical offset we want this new extent starts from. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ int ocfs2_change_extent_flag(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc, int new_flags, int clear_flags) { int ret, index; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys); struct ocfs2_extent_rec split_rec; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; left_path = ocfs2_new_path_from_et(et); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(left_path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(sb, "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } ret = -EIO; rec = &el->l_recs[index]; if (new_flags && (rec->e_flags & new_flags)) { mlog(ML_ERROR, "Owner %llu tried to set %d flags on an " "extent that already had them", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), new_flags); goto out; } if (clear_flags && !(rec->e_flags & clear_flags)) { mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an " "extent that didn't have them", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), clear_flags); goto out; } memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); split_rec.e_cpos = cpu_to_le32(cpos); split_rec.e_leaf_clusters = cpu_to_le16(len); split_rec.e_blkno = cpu_to_le64(start_blkno); split_rec.e_flags = rec->e_flags; if (new_flags) split_rec.e_flags |= new_flags; if (clear_flags) split_rec.e_flags &= ~clear_flags; ret = ocfs2_split_extent(handle, et, left_path, index, &split_rec, meta_ac, dealloc); if (ret) mlog_errno(ret); out: ocfs2_free_path(left_path); return ret; } /* * Mark the already-existing extent at cpos as written for len clusters. * This removes the unwritten extent flag. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ int ocfs2_mark_extent_written(struct inode *inode, struct ocfs2_extent_tree *et, handle_t *handle, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; trace_ocfs2_mark_extent_written( (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, phys); if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " "that are being written to, but the feature bit " "is not set in the super block.", (unsigned long long)OCFS2_I(inode)->ip_blkno); ret = -EROFS; goto out; } /* * XXX: This should be fixed up so that we just re-insert the * next extent records. */ ocfs2_et_extent_map_truncate(et, 0); ret = ocfs2_change_extent_flag(handle, et, cpos, len, phys, meta_ac, dealloc, 0, OCFS2_EXT_UNWRITTEN); if (ret) mlog_errno(ret); out: return ret; } static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, u32 new_range, struct ocfs2_alloc_context *meta_ac) { int ret, depth, credits; struct buffer_head *last_eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *rightmost_el, *el; struct ocfs2_extent_rec split_rec; struct ocfs2_extent_rec *rec; struct ocfs2_insert_type insert; /* * Setup the record to split before we grow the tree. */ el = path_leaf_el(path); rec = &el->l_recs[index]; ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), &split_rec, new_range, rec); depth = path->p_tree_depth; if (depth > 0) { ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret < 0) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; rightmost_el = &eb->h_list; } else rightmost_el = path_leaf_el(path); credits = path->p_tree_depth + ocfs2_extend_meta_needed(et->et_root_el); ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } } memset(&insert, 0, sizeof(struct ocfs2_insert_type)); insert.ins_appending = APPEND_NONE; insert.ins_contig = CONTIG_NONE; insert.ins_split = SPLIT_RIGHT; insert.ins_tree_depth = depth; ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) mlog_errno(ret); out: brelse(last_eb_bh); return ret; } static int ocfs2_truncate_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, struct ocfs2_cached_dealloc_ctxt *dealloc, u32 cpos, u32 len) { int ret; u32 left_cpos, rec_range, trunc_range; int wants_rotate = 0, is_rightmost_tree_rec = 0; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el = path_leaf_el(path); struct ocfs2_extent_rec *rec; struct ocfs2_extent_block *eb; if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } index--; } if (index == (le16_to_cpu(el->l_next_free_rec) - 1) && path->p_tree_depth) { /* * Check whether this is the rightmost tree record. If * we remove all of this record or part of its right * edge then an update of the record lengths above it * will be required. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; if (eb->h_next_leaf_blk == 0) is_rightmost_tree_rec = 1; } rec = &el->l_recs[index]; if (index == 0 && path->p_tree_depth && le32_to_cpu(rec->e_cpos) == cpos) { /* * Changing the leftmost offset (via partial or whole * record truncate) of an interior (or rightmost) path * means we have to update the subtree that is formed * by this leaf and the one to it's left. * * There are two cases we can skip: * 1) Path is the leftmost one in our btree. * 2) The leaf is rightmost and will be empty after * we remove the extent record - the rotate code * knows how to update the newly formed edge. */ ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } } } ret = ocfs2_extend_rotate_transaction(handle, 0, handle->h_buffer_credits, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; } rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); trunc_range = cpos + len; if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) { int next_free; memset(rec, 0, sizeof(*rec)); ocfs2_cleanup_merge(el, index); wants_rotate = 1; next_free = le16_to_cpu(el->l_next_free_rec); if (is_rightmost_tree_rec && next_free > 1) { /* * We skip the edge update if this path will * be deleted by the rotate code. */ rec = &el->l_recs[next_free - 1]; ocfs2_adjust_rightmost_records(handle, et, path, rec); } } else if (le32_to_cpu(rec->e_cpos) == cpos) { /* Remove leftmost portion of the record. */ le32_add_cpu(&rec->e_cpos, len); le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len)); le16_add_cpu(&rec->e_leaf_clusters, -len); } else if (rec_range == trunc_range) { /* Remove rightmost portion of the record */ le16_add_cpu(&rec->e_leaf_clusters, -len); if (is_rightmost_tree_rec) ocfs2_adjust_rightmost_records(handle, et, path, rec); } else { /* Caller should have trapped this. */ mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) " "(%u, %u)\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), le32_to_cpu(rec->e_cpos), le16_to_cpu(rec->e_leaf_clusters), cpos, len); BUG(); } if (left_path) { int subtree_index; subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_complete_edge_insert(handle, left_path, path, subtree_index); } ocfs2_journal_dirty(handle, path_leaf_bh(path)); ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } out: ocfs2_free_path(left_path); return ret; } int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, index; u32 rec_range, trunc_range; struct ocfs2_extent_rec *rec; struct ocfs2_extent_list *el; struct ocfs2_path *path = NULL; /* * XXX: Why are we truncating to 0 instead of wherever this * affects us? */ ocfs2_et_extent_map_truncate(et, 0); path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } /* * We have 3 cases of extent removal: * 1) Range covers the entire extent rec * 2) Range begins or ends on one edge of the extent rec * 3) Range is in the middle of the extent rec (no shared edges) * * For case 1 we remove the extent rec and left rotate to * fill the hole. * * For case 2 we just shrink the existing extent rec, with a * tree update if the shrinking edge is also the edge of an * extent block. * * For case 3 we do a right split to turn the extent rec into * something case 2 can handle. */ rec = &el->l_recs[index]; rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); trunc_range = cpos + len; BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); trace_ocfs2_remove_extent( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, len, index, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, cpos, len); if (ret) { mlog_errno(ret); goto out; } } else { ret = ocfs2_split_tree(handle, et, path, index, trunc_range, meta_ac); if (ret) { mlog_errno(ret); goto out; } /* * The split could have manipulated the tree enough to * move the record location, so we have to look for it again. */ ocfs2_reinit_path(path, 1); ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu: split at cpos %u lost record.", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } /* * Double check our values here. If anything is fishy, * it's easier to catch it at the top level. */ rec = &el->l_recs[index]; rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (rec_range != trunc_range) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu: error after split at cpos %u" "trunc len %u, existing record is (%u,%u)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, len, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, cpos, len); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(path); return ret; } /* * ocfs2_reserve_blocks_for_rec_trunc() would look basically the * same as ocfs2_lock_alloctors(), except for it accepts a blocks * number to reserve some extra blocks, and it only handles meta * data allocations. * * Currently, only ocfs2_remove_btree_range() uses it for truncating * and punching holes. */ static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode, struct ocfs2_extent_tree *et, u32 extents_to_split, struct ocfs2_alloc_context **ac, int extra_blocks) { int ret = 0, num_free_extents; unsigned int max_recs_needed = 2 * extents_to_split; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); *ac = NULL; num_free_extents = ocfs2_num_free_extents(osb, et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (!num_free_extents || (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); if (extra_blocks) { ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, ac); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } } out: if (ret) { if (*ac) { ocfs2_free_alloc_context(*ac); *ac = NULL; } } return ret; } int ocfs2_remove_btree_range(struct inode *inode, struct ocfs2_extent_tree *et, u32 cpos, u32 phys_cpos, u32 len, int flags, struct ocfs2_cached_dealloc_ctxt *dealloc, u64 refcount_loc) { int ret, credits = 0, extra_blocks = 0; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_refcount_tree *ref_tree = NULL; if ((flags & OCFS2_EXT_REFCOUNTED) && len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_prepare_refcount_change_for_del(inode, refcount_loc, phys_blkno, len, &credits, &extra_blocks); if (ret < 0) { mlog_errno(ret); goto out; } } ret = ocfs2_reserve_blocks_for_rec_trunc(inode, et, 1, &meta_ac, extra_blocks); if (ret) { mlog_errno(ret); return ret; } mutex_lock(&tl_inode->i_mutex); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb) + credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dquot_free_space_nodirty(inode, ocfs2_clusters_to_bytes(inode->i_sb, len)); ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out_commit; } ocfs2_et_update_clusters(et, -len); ocfs2_journal_dirty(handle, et->et_root_bh); if (phys_blkno) { if (flags & OCFS2_EXT_REFCOUNTED) ret = ocfs2_decrease_refcount(inode, handle, ocfs2_blocks_to_clusters(osb->sb, phys_blkno), len, meta_ac, dealloc, 1); else ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); if (ret) mlog_errno(ret); } out_commit: ocfs2_commit_trans(osb, handle); out: mutex_unlock(&tl_inode->i_mutex); if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) { struct buffer_head *tl_bh = osb->osb_tl_bh; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count), "slot %d, invalid truncate log parameters: used = " "%u, count = %u\n", osb->slot_num, le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count)); return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count); } static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, unsigned int new_start) { unsigned int tail_index; unsigned int current_tail; /* No records, nothing to coalesce */ if (!le16_to_cpu(tl->tl_used)) return 0; tail_index = le16_to_cpu(tl->tl_used) - 1; current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start); current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters); return current_tail == new_start; } int ocfs2_truncate_log_append(struct ocfs2_super *osb, handle_t *handle, u64 start_blk, unsigned int num_clusters) { int status, index; unsigned int start_cluster, tl_count; struct inode *tl_inode = osb->osb_tl_inode; struct buffer_head *tl_bh = osb->osb_tl_bh; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; BUG_ON(mutex_trylock(&tl_inode->i_mutex)); start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated * by the underlying call to ocfs2_read_inode_block(), so any * corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; tl_count = le16_to_cpu(tl->tl_count); mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || tl_count == 0, "Truncate record count on #%llu invalid " "wanted %u, actual %u\n", (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, ocfs2_truncate_recs_per_inode(osb->sb), le16_to_cpu(tl->tl_count)); /* Caller should have known to flush before calling us. */ index = le16_to_cpu(tl->tl_used); if (index >= tl_count) { status = -ENOSPC; mlog_errno(status); goto bail; } status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } trace_ocfs2_truncate_log_append( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, start_cluster, num_clusters); if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { /* * Move index back to the record we are coalescing with. * ocfs2_truncate_log_can_coalesce() guarantees nonzero */ index--; num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); trace_ocfs2_truncate_log_append( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, le32_to_cpu(tl->tl_recs[index].t_start), num_clusters); } else { tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); tl->tl_used = cpu_to_le16(index + 1); } tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); ocfs2_journal_dirty(handle, tl_bh); osb->truncated_clusters += num_clusters; bail: return status; } static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, handle_t *handle, struct inode *data_alloc_inode, struct buffer_head *data_alloc_bh) { int status = 0; int i; unsigned int num_clusters; u64 start_blk; struct ocfs2_truncate_rec rec; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; struct inode *tl_inode = osb->osb_tl_inode; struct buffer_head *tl_bh = osb->osb_tl_bh; di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; i = le16_to_cpu(tl->tl_used) - 1; while (i >= 0) { /* Caller has given us at least enough credits to * update the truncate log dinode */ status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } tl->tl_used = cpu_to_le16(i); ocfs2_journal_dirty(handle, tl_bh); /* TODO: Perhaps we can calculate the bulk of the * credits up front rather than extending like * this. */ status = ocfs2_extend_trans(handle, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); if (status < 0) { mlog_errno(status); goto bail; } rec = tl->tl_recs[i]; start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, le32_to_cpu(rec.t_start)); num_clusters = le32_to_cpu(rec.t_clusters); /* if start_blk is not set, we ignore the record as * invalid. */ if (start_blk) { trace_ocfs2_replay_truncate_records( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, i, le32_to_cpu(rec.t_start), num_clusters); status = ocfs2_free_clusters(handle, data_alloc_inode, data_alloc_bh, start_blk, num_clusters); if (status < 0) { mlog_errno(status); goto bail; } } i--; } osb->truncated_clusters = 0; bail: return status; } /* Expects you to already be holding tl_inode->i_mutex */ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) { int status; unsigned int num_to_flush; handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct inode *data_alloc_inode = NULL; struct buffer_head *tl_bh = osb->osb_tl_bh; struct buffer_head *data_alloc_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; BUG_ON(mutex_trylock(&tl_inode->i_mutex)); di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated * by the underlying call to ocfs2_read_inode_block(), so any * corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; num_to_flush = le16_to_cpu(tl->tl_used); trace_ocfs2_flush_truncate_log( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, num_to_flush); if (!num_to_flush) { status = 0; goto out; } data_alloc_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!data_alloc_inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get bitmap inode!\n"); goto out; } mutex_lock(&data_alloc_inode->i_mutex); status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1); if (status < 0) { mlog_errno(status); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto out_unlock; } status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, data_alloc_bh); if (status < 0) mlog_errno(status); ocfs2_commit_trans(osb, handle); out_unlock: brelse(data_alloc_bh); ocfs2_inode_unlock(data_alloc_inode, 1); out_mutex: mutex_unlock(&data_alloc_inode->i_mutex); iput(data_alloc_inode); out: return status; } int ocfs2_flush_truncate_log(struct ocfs2_super *osb) { int status; struct inode *tl_inode = osb->osb_tl_inode; mutex_lock(&tl_inode->i_mutex); status = __ocfs2_flush_truncate_log(osb); mutex_unlock(&tl_inode->i_mutex); return status; } static void ocfs2_truncate_log_worker(struct work_struct *work) { int status; struct ocfs2_super *osb = container_of(work, struct ocfs2_super, osb_truncate_log_wq.work); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); else ocfs2_init_steal_slots(osb); } #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, int cancel) { if (osb->osb_tl_inode) { /* We want to push off log flushes while truncates are * still running. */ if (cancel) cancel_delayed_work(&osb->osb_truncate_log_wq); queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); } } static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int slot_num, struct inode **tl_inode, struct buffer_head **tl_bh) { int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, slot_num); if (!inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get load truncate log inode!\n"); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { iput(inode); mlog_errno(status); goto bail; } *tl_inode = inode; *tl_bh = bh; bail: return status; } /* called during the 1st stage of node recovery. we stamp a clean * truncate log and pass back a copy for processing later. if the * truncate log does not require processing, a *tl_copy is set to * NULL. */ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, int slot_num, struct ocfs2_dinode **tl_copy) { int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; *tl_copy = NULL; trace_ocfs2_begin_truncate_log_recovery(slot_num); status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); if (status < 0) { mlog_errno(status); goto bail; } di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's * validated by the underlying call to ocfs2_read_inode_block(), * so any corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; if (le16_to_cpu(tl->tl_used)) { trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used)); *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); if (!(*tl_copy)) { status = -ENOMEM; mlog_errno(status); goto bail; } /* Assuming the write-out below goes well, this copy * will be passed back to recovery for processing. */ memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); /* All we need to do to clear the truncate log is set * tl_used. */ tl->tl_used = 0; ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check); status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode)); if (status < 0) { mlog_errno(status); goto bail; } } bail: if (tl_inode) iput(tl_inode); brelse(tl_bh); if (status < 0 && (*tl_copy)) { kfree(*tl_copy); *tl_copy = NULL; mlog_errno(status); } return status; } int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, struct ocfs2_dinode *tl_copy) { int status = 0; int i; unsigned int clusters, num_recs, start_cluster; u64 start_blk; handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_truncate_log *tl; if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); return -EINVAL; } tl = &tl_copy->id2.i_dealloc; num_recs = le16_to_cpu(tl->tl_used); trace_ocfs2_complete_truncate_log_recovery( (unsigned long long)le64_to_cpu(tl_copy->i_blkno), num_recs); mutex_lock(&tl_inode->i_mutex); for(i = 0; i < num_recs; i++) { if (ocfs2_truncate_log_needs_flush(osb)) { status = __ocfs2_flush_truncate_log(osb); if (status < 0) { mlog_errno(status); goto bail_up; } } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_up; } clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); status = ocfs2_truncate_log_append(osb, handle, start_blk, clusters); ocfs2_commit_trans(osb, handle); if (status < 0) { mlog_errno(status); goto bail_up; } } bail_up: mutex_unlock(&tl_inode->i_mutex); return status; } void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) { int status; struct inode *tl_inode = osb->osb_tl_inode; if (tl_inode) { cancel_delayed_work(&osb->osb_truncate_log_wq); flush_workqueue(ocfs2_wq); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); brelse(osb->osb_tl_bh); iput(osb->osb_tl_inode); } } int ocfs2_truncate_log_init(struct ocfs2_super *osb) { int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; status = ocfs2_get_truncate_log_info(osb, osb->slot_num, &tl_inode, &tl_bh); if (status < 0) mlog_errno(status); /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; return status; } /* * Delayed de-allocation of suballocator blocks. * * Some sets of block de-allocations might involve multiple suballocator inodes. * * The locking for this can get extremely complicated, especially when * the suballocator inodes to delete from aren't known until deep * within an unrelated codepath. * * ocfs2_extent_block structures are a good example of this - an inode * btree could have been grown by any number of nodes each allocating * out of their own suballoc inode. * * These structures allow the delay of block de-allocation until a * later time, when locking of multiple cluster inodes won't cause * deadlock. */ /* * Describe a single bit freed from a suballocator. For the block * suballocators, it represents one block. For the global cluster * allocator, it represents some clusters and free_bit indicates * clusters number. */ struct ocfs2_cached_block_free { struct ocfs2_cached_block_free *free_next; u64 free_bg; u64 free_blk; unsigned int free_bit; }; struct ocfs2_per_slot_free_list { struct ocfs2_per_slot_free_list *f_next_suballocator; int f_inode_type; int f_slot; struct ocfs2_cached_block_free *f_first; }; static int ocfs2_free_cached_blocks(struct ocfs2_super *osb, int sysfile_type, int slot, struct ocfs2_cached_block_free *head) { int ret; u64 bg_blkno; handle_t *handle; struct inode *inode; struct buffer_head *di_bh = NULL; struct ocfs2_cached_block_free *tmp; inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot); if (!inode) { ret = -EINVAL; mlog_errno(ret); goto out; } mutex_lock(&inode->i_mutex); ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } while (head) { if (head->free_bg) bg_blkno = head->free_bg; else bg_blkno = ocfs2_which_suballoc_group(head->free_blk, head->free_bit); trace_ocfs2_free_cached_blocks( (unsigned long long)head->free_blk, head->free_bit); ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, head->free_bit, bg_blkno, 1); if (ret) { mlog_errno(ret); goto out_journal; } ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); if (ret) { mlog_errno(ret); goto out_journal; } tmp = head; head = head->free_next; kfree(tmp); } out_journal: ocfs2_commit_trans(osb, handle); out_unlock: ocfs2_inode_unlock(inode, 1); brelse(di_bh); out_mutex: mutex_unlock(&inode->i_mutex); iput(inode); out: while(head) { /* Premature exit may have left some dangling items. */ tmp = head; head = head->free_next; kfree(tmp); } return ret; } int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, u64 blkno, unsigned int bit) { int ret = 0; struct ocfs2_cached_block_free *item; item = kzalloc(sizeof(*item), GFP_NOFS); if (item == NULL) { ret = -ENOMEM; mlog_errno(ret); return ret; } trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit); item->free_blk = blkno; item->free_bit = bit; item->free_next = ctxt->c_global_allocator; ctxt->c_global_allocator = item; return ret; } static int ocfs2_free_cached_clusters(struct ocfs2_super *osb, struct ocfs2_cached_block_free *head) { struct ocfs2_cached_block_free *tmp; struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; int ret = 0; mutex_lock(&tl_inode->i_mutex); while (head) { if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); break; } } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); break; } ret = ocfs2_truncate_log_append(osb, handle, head->free_blk, head->free_bit); ocfs2_commit_trans(osb, handle); tmp = head; head = head->free_next; kfree(tmp); if (ret < 0) { mlog_errno(ret); break; } } mutex_unlock(&tl_inode->i_mutex); while (head) { /* Premature exit may have left some dangling items. */ tmp = head; head = head->free_next; kfree(tmp); } return ret; } int ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt) { int ret = 0, ret2; struct ocfs2_per_slot_free_list *fl; if (!ctxt) return 0; while (ctxt->c_first_suballocator) { fl = ctxt->c_first_suballocator; if (fl->f_first) { trace_ocfs2_run_deallocs(fl->f_inode_type, fl->f_slot); ret2 = ocfs2_free_cached_blocks(osb, fl->f_inode_type, fl->f_slot, fl->f_first); if (ret2) mlog_errno(ret2); if (!ret) ret = ret2; } ctxt->c_first_suballocator = fl->f_next_suballocator; kfree(fl); } if (ctxt->c_global_allocator) { ret2 = ocfs2_free_cached_clusters(osb, ctxt->c_global_allocator); if (ret2) mlog_errno(ret2); if (!ret) ret = ret2; ctxt->c_global_allocator = NULL; } return ret; } static struct ocfs2_per_slot_free_list * ocfs2_find_per_slot_free_list(int type, int slot, struct ocfs2_cached_dealloc_ctxt *ctxt) { struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; while (fl) { if (fl->f_inode_type == type && fl->f_slot == slot) return fl; fl = fl->f_next_suballocator; } fl = kmalloc(sizeof(*fl), GFP_NOFS); if (fl) { fl->f_inode_type = type; fl->f_slot = slot; fl->f_first = NULL; fl->f_next_suballocator = ctxt->c_first_suballocator; ctxt->c_first_suballocator = fl; } return fl; } int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, int type, int slot, u64 suballoc, u64 blkno, unsigned int bit) { int ret; struct ocfs2_per_slot_free_list *fl; struct ocfs2_cached_block_free *item; fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); if (fl == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } item = kzalloc(sizeof(*item), GFP_NOFS); if (item == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } trace_ocfs2_cache_block_dealloc(type, slot, (unsigned long long)suballoc, (unsigned long long)blkno, bit); item->free_bg = suballoc; item->free_blk = blkno; item->free_bit = bit; item->free_next = fl->f_first; fl->f_first = item; ret = 0; out: return ret; } static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb) { return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(eb->h_suballoc_slot), le64_to_cpu(eb->h_suballoc_loc), le64_to_cpu(eb->h_blkno), le16_to_cpu(eb->h_suballoc_bit)); } static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) { set_buffer_uptodate(bh); mark_buffer_dirty(bh); return 0; } void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, unsigned int from, unsigned int to, struct page *page, int zero, u64 *phys) { int ret, partial = 0; ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); if (ret) mlog_errno(ret); if (zero) zero_user_segment(page, from, to); /* * Need to set the buffers we zero'd into uptodate * here if they aren't - ocfs2_map_page_blocks() * might've skipped some */ ret = walk_page_buffers(handle, page_buffers(page), from, to, &partial, ocfs2_zero_func); if (ret < 0) mlog_errno(ret); else if (ocfs2_should_order_data(inode)) { ret = ocfs2_jbd2_file_inode(handle, inode); if (ret < 0) mlog_errno(ret); } if (!partial) SetPageUptodate(page); flush_dcache_page(page); } static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int numpages, u64 phys, handle_t *handle) { int i; struct page *page; unsigned int from, to = PAGE_CACHE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); if (numpages == 0) goto out; to = PAGE_CACHE_SIZE; for(i = 0; i < numpages; i++) { page = pages[i]; from = start & (PAGE_CACHE_SIZE - 1); if ((end >> PAGE_CACHE_SHIFT) == page->index) to = end & (PAGE_CACHE_SIZE - 1); BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(to > PAGE_CACHE_SIZE); ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, &phys); start = (page->index + 1) << PAGE_CACHE_SHIFT; } out: if (pages) ocfs2_unlock_and_free_pages(pages, numpages); } int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int *num) { int numpages, ret = 0; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); numpages = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_CACHE_SHIFT; do { pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); if (!pages[numpages]) { ret = -ENOMEM; mlog_errno(ret); goto out; } numpages++; index++; } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); out: if (ret != 0) { if (pages) ocfs2_unlock_and_free_pages(pages, numpages); numpages = 0; } *num = numpages; return ret; } static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int *num) { struct super_block *sb = inode->i_sb; BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); return ocfs2_grab_pages(inode, start, end, pages, num); } /* * Zero the area past i_size but still within an allocated * cluster. This avoids exposing nonzero data on subsequent file * extends. * * We need to call this before i_size is updated on the inode because * otherwise block_write_full_page() will skip writeout of pages past * i_size. The new_i_size parameter is passed for this reason. */ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end) { int ret = 0, numpages; struct page **pages = NULL; u64 phys; unsigned int ext_flags; struct super_block *sb = inode->i_sb; /* * File systems which don't support sparse files zero on every * extend. */ if (!ocfs2_sparse_alloc(OCFS2_SB(sb))) return 0; pages = kcalloc(ocfs2_pages_per_cluster(sb), sizeof(struct page *), GFP_NOFS); if (pages == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (range_start == range_end) goto out; ret = ocfs2_extent_map_get_blocks(inode, range_start >> sb->s_blocksize_bits, &phys, NULL, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* * Tail is a hole, or is marked unwritten. In either case, we * can count on read and write to return/push zero's. */ if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) goto out; ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, &numpages); if (ret) { mlog_errno(ret); goto out; } ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, numpages, phys, handle); /* * Initiate writeout of the pages we zero'd here. We don't * wait on them - the truncate_inode_pages() call later will * do that for us. */ ret = filemap_fdatawrite_range(inode->i_mapping, range_start, range_end - 1); if (ret) mlog_errno(ret); out: if (pages) kfree(pages); return ret; } static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode, struct ocfs2_dinode *di) { unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits; unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2) - xattrsize); else memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2)); } void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di) { ocfs2_zero_dinode_id2_with_xattr(inode, di); di->id2.i_list.l_tree_depth = 0; di->id2.i_list.l_next_free_rec = 0; di->id2.i_list.l_count = cpu_to_le16( ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di)); } void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_inline_data *idata = &di->id2.i_data; spin_lock(&oi->ip_lock); oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); /* * We clear the entire i_data structure here so that all * fields can be properly initialized. */ ocfs2_zero_dinode_id2_with_xattr(inode, di); idata->id_count = cpu_to_le16( ocfs2_max_inline_data_with_xattr(inode->i_sb, di)); } int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { int ret, i, has_data, num_pages = 0; handle_t *handle; u64 uninitialized_var(block); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; struct page **pages = NULL; loff_t end = osb->s_clustersize; struct ocfs2_extent_tree et; int did_quota = 0; has_data = i_size_read(inode) ? 1 : 0; if (has_data) { pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), sizeof(struct page *), GFP_NOFS); if (pages == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_reserve_clusters(osb, 1, &data_ac); if (ret) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans(osb, ocfs2_inline_to_extents_credits(osb->sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } if (has_data) { u32 bit_off, num; unsigned int page_end; u64 phys; ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); if (ret) goto out_commit; did_quota = 1; data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &num); if (ret) { mlog_errno(ret); goto out_commit; } /* * Save two copies, one for insert, and one that can * be changed by ocfs2_map_and_dirty_page() below. */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); /* * Non sparse file systems zero on extend, so no need * to do that now. */ if (!ocfs2_sparse_alloc(osb) && PAGE_CACHE_SIZE < osb->s_clustersize) end = PAGE_CACHE_SIZE; ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); if (ret) { mlog_errno(ret); goto out_commit; } /* * This should populate the 1st page for us and mark * it up to date. */ ret = ocfs2_read_inline_data(inode, pages[0], di_bh); if (ret) { mlog_errno(ret); goto out_commit; } page_end = PAGE_CACHE_SIZE; if (PAGE_CACHE_SIZE > osb->s_clustersize) page_end = osb->s_clustersize; for (i = 0; i < num_pages; i++) ocfs2_map_and_dirty_page(inode, handle, 0, page_end, pages[i], i > 0, &phys); } spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_dinode_new_extent_list(inode, di); ocfs2_journal_dirty(handle, di_bh); if (has_data) { /* * An error at this point should be extremely rare. If * this proves to be false, we could always re-build * the in-inode data from our pages. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; } inode->i_blocks = ocfs2_inode_sector_count(inode); } out_commit: if (ret < 0 && did_quota) dquot_free_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); ocfs2_commit_trans(osb, handle); out_unlock: if (data_ac) ocfs2_free_alloc_context(data_ac); out: if (pages) { ocfs2_unlock_and_free_pages(pages, num_pages); kfree(pages); } return ret; } /* * It is expected, that by the time you call this function, * inode->i_size and fe->i_size have been adjusted. * * WARNING: This will kfree the truncate context */ int ocfs2_commit_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *di_bh) { int status = 0, i, flags = 0; u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff; u64 blkno = 0; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; struct ocfs2_path *path = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_extent_list *root_el = &(di->id2.i_list); u64 refcount_loc = le64_to_cpu(di->i_refcount_loc); struct ocfs2_extent_tree et; struct ocfs2_cached_dealloc_ctxt dealloc; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&dealloc); new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, i_size_read(inode)); path = ocfs2_new_path(di_bh, &di->id2.i_list, ocfs2_journal_access_di); if (!path) { status = -ENOMEM; mlog_errno(status); goto bail; } ocfs2_extent_map_trunc(inode, new_highest_cpos); start: /* * Check that we still have allocation to delete. */ if (OCFS2_I(inode)->ip_clusters == 0) { status = 0; goto bail; } /* * Truncate always works against the rightmost tree branch. */ status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX); if (status) { mlog_errno(status); goto bail; } trace_ocfs2_commit_truncate( (unsigned long long)OCFS2_I(inode)->ip_blkno, new_highest_cpos, OCFS2_I(inode)->ip_clusters, path->p_tree_depth); /* * By now, el will point to the extent list on the bottom most * portion of this tree. Only the tail record is considered in * each pass. * * We handle the following cases, in order: * - empty extent: delete the remaining branch * - remove the entire record * - remove a partial record * - no record needs to be removed (truncate has completed) */ el = path_leaf_el(path); if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(inode->i_sb, "Inode %llu has empty extent block at %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)path_leaf_bh(path)->b_blocknr); status = -EROFS; goto bail; } i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; flags = rec->e_flags; range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (i == 0 && ocfs2_is_empty_extent(rec)) { /* * Lower levels depend on this never happening, but it's best * to check it up here before changing the tree. */ if (root_el->l_tree_depth && rec->e_int_clusters == 0) { ocfs2_error(inode->i_sb, "Inode %lu has an empty " "extent record, depth %u\n", inode->i_ino, le16_to_cpu(root_el->l_tree_depth)); status = -EROFS; goto bail; } trunc_cpos = le32_to_cpu(rec->e_cpos); trunc_len = 0; blkno = 0; } else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) { /* * Truncate entire record. */ trunc_cpos = le32_to_cpu(rec->e_cpos); trunc_len = ocfs2_rec_clusters(el, rec); blkno = le64_to_cpu(rec->e_blkno); } else if (range > new_highest_cpos) { /* * Partial truncate. it also should be * the last truncate we're doing. */ trunc_cpos = new_highest_cpos; trunc_len = range - new_highest_cpos; coff = new_highest_cpos - le32_to_cpu(rec->e_cpos); blkno = le64_to_cpu(rec->e_blkno) + ocfs2_clusters_to_blocks(inode->i_sb, coff); } else { /* * Truncate completed, leave happily. */ status = 0; goto bail; } phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno); status = ocfs2_remove_btree_range(inode, &et, trunc_cpos, phys_cpos, trunc_len, flags, &dealloc, refcount_loc); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_reinit_path(path, 1); /* * The check above will catch the case where we've truncated * away all allocation. */ goto start; bail: ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); ocfs2_free_path(path); return status; } /* * 'start' is inclusive, 'end' is not. */ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, unsigned int start, unsigned int end, int trunc) { int ret; unsigned int numbytes; handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *idata = &di->id2.i_data; if (end > i_size_read(inode)) end = i_size_read(inode); BUG_ON(start >= end); if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || !ocfs2_supports_inline_data(osb)) { ocfs2_error(inode->i_sb, "Inline data flags for inode %llu don't agree! " "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, le16_to_cpu(di->i_dyn_features), OCFS2_I(inode)->ip_dyn_features, osb->s_feature_incompat); ret = -EROFS; goto out; } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } numbytes = end - start; memset(idata->id_data + start, 0, numbytes); /* * No need to worry about the data page here - it's been * truncated already and inline data doesn't need it for * pushing zero's to disk, so we'll let readpage pick it up * later. */ if (trunc) { i_size_write(inode, start); di->i_size = cpu_to_le64(start); } inode->i_blocks = ocfs2_inode_sector_count(inode); inode->i_ctime = inode->i_mtime = CURRENT_TIME; di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_trim_extent(struct super_block *sb, struct ocfs2_group_desc *gd, u32 start, u32 count) { u64 discard, bcount; bcount = ocfs2_clusters_to_blocks(sb, count); discard = le64_to_cpu(gd->bg_blkno) + ocfs2_clusters_to_blocks(sb, start); trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount); return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0); } static int ocfs2_trim_group(struct super_block *sb, struct ocfs2_group_desc *gd, u32 start, u32 max, u32 minbits) { int ret = 0, count = 0, next; void *bitmap = gd->bg_bitmap; if (le16_to_cpu(gd->bg_free_bits_count) < minbits) return 0; trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno), start, max, minbits); while (start < max) { start = ocfs2_find_next_zero_bit(bitmap, max, start); if (start >= max) break; next = ocfs2_find_next_bit(bitmap, max, start); if ((next - start) >= minbits) { ret = ocfs2_trim_extent(sb, gd, start, next - start); if (ret < 0) { mlog_errno(ret); break; } count += next - start; } start = next + 1; if (fatal_signal_pending(current)) { count = -ERESTARTSYS; break; } if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits) break; } if (ret < 0) count = ret; return count; } int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range) { struct ocfs2_super *osb = OCFS2_SB(sb); u64 start, len, trimmed, first_group, last_group, group; int ret, cnt; u32 first_bit, last_bit, minlen; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; struct buffer_head *gd_bh = NULL; struct ocfs2_dinode *main_bm; struct ocfs2_group_desc *gd = NULL; start = range->start >> osb->s_clustersize_bits; len = range->len >> osb->s_clustersize_bits; minlen = range->minlen >> osb->s_clustersize_bits; trimmed = 0; if (!len) { range->len = 0; return 0; } if (minlen >= osb->bitmap_cpg) return -EINVAL; main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { ret = -EIO; mlog_errno(ret); goto out; } mutex_lock(&main_bm_inode->i_mutex); ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0); if (ret < 0) { mlog_errno(ret); goto out_mutex; } main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data; if (start >= le32_to_cpu(main_bm->i_clusters)) { ret = -EINVAL; goto out_unlock; } if (start + len > le32_to_cpu(main_bm->i_clusters)) len = le32_to_cpu(main_bm->i_clusters) - start; trace_ocfs2_trim_fs(start, len, minlen); /* Determine first and last group to examine based on start and len */ first_group = ocfs2_which_cluster_group(main_bm_inode, start); if (first_group == osb->first_cluster_group_blkno) first_bit = start; else first_bit = start - ocfs2_blocks_to_clusters(sb, first_group); last_group = ocfs2_which_cluster_group(main_bm_inode, start + len - 1); last_bit = osb->bitmap_cpg; for (group = first_group; group <= last_group;) { if (first_bit + len >= osb->bitmap_cpg) last_bit = osb->bitmap_cpg; else last_bit = first_bit + len; ret = ocfs2_read_group_descriptor(main_bm_inode, main_bm, group, &gd_bh); if (ret < 0) { mlog_errno(ret); break; } gd = (struct ocfs2_group_desc *)gd_bh->b_data; cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen); brelse(gd_bh); gd_bh = NULL; if (cnt < 0) { ret = cnt; mlog_errno(ret); break; } trimmed += cnt; len -= osb->bitmap_cpg - first_bit; first_bit = 0; if (group == osb->first_cluster_group_blkno) group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); else group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); } range->len = trimmed * sb->s_blocksize; out_unlock: ocfs2_inode_unlock(main_bm_inode, 0); brelse(main_bm_bh); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); iput(main_bm_inode); out: return ret; }
gpl-2.0
sebirdman/m7_kernel_dev
drivers/watchdog/ibmasr.c
7383
9666
/* * IBM Automatic Server Restart driver. * * Copyright (c) 2005 Andrey Panin <pazke@donpac.ru> * * Based on driver written by Pete Reynolds. * Copyright (c) IBM Corporation, 1998-2004. * * This software may be used and distributed according to the terms * of the GNU Public License, incorporated herein by reference. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/uaccess.h> enum { ASMTYPE_UNKNOWN, ASMTYPE_TOPAZ, ASMTYPE_JASPER, ASMTYPE_PEARL, ASMTYPE_JUNIPER, ASMTYPE_SPRUCE, }; #define TOPAZ_ASR_REG_OFFSET 4 #define TOPAZ_ASR_TOGGLE 0x40 #define TOPAZ_ASR_DISABLE 0x80 /* PEARL ASR S/W REGISTER SUPERIO PORT ADDRESSES */ #define PEARL_BASE 0xe04 #define PEARL_WRITE 0xe06 #define PEARL_READ 0xe07 #define PEARL_ASR_DISABLE_MASK 0x80 /* bit 7: disable = 1, enable = 0 */ #define PEARL_ASR_TOGGLE_MASK 0x40 /* bit 6: 0, then 1, then 0 */ /* JASPER OFFSET FROM SIO BASE ADDR TO ASR S/W REGISTERS. */ #define JASPER_ASR_REG_OFFSET 0x38 #define JASPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1, enable = 0 */ #define JASPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */ #define JUNIPER_BASE_ADDRESS 0x54b /* Base address of Juniper ASR */ #define JUNIPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1 enable = 0 */ #define JUNIPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */ #define SPRUCE_BASE_ADDRESS 0x118e /* Base address of Spruce ASR */ #define SPRUCE_ASR_DISABLE_MASK 0x01 /* bit 1: disable = 1 enable = 0 */ #define SPRUCE_ASR_TOGGLE_MASK 0x02 /* bit 0: 0, then 1, then 0 */ static bool nowayout = WATCHDOG_NOWAYOUT; static unsigned long asr_is_open; static char asr_expect_close; static unsigned int asr_type, asr_base, asr_length; static unsigned int asr_read_addr, asr_write_addr; static unsigned char asr_toggle_mask, asr_disable_mask; static DEFINE_SPINLOCK(asr_lock); static void __asr_toggle(void) { unsigned char reg; reg = inb(asr_read_addr); outb(reg & ~asr_toggle_mask, asr_write_addr); reg = inb(asr_read_addr); outb(reg | asr_toggle_mask, asr_write_addr); reg = inb(asr_read_addr); outb(reg & ~asr_toggle_mask, asr_write_addr); reg = inb(asr_read_addr); } static void asr_toggle(void) { spin_lock(&asr_lock); __asr_toggle(); spin_unlock(&asr_lock); } static void asr_enable(void) { unsigned char reg; spin_lock(&asr_lock); if (asr_type == ASMTYPE_TOPAZ) { /* asr_write_addr == asr_read_addr */ reg = inb(asr_read_addr); outb(reg & ~(TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE), asr_read_addr); } else { /* * First make sure the hardware timer is reset by toggling * ASR hardware timer line. */ __asr_toggle(); reg = inb(asr_read_addr); outb(reg & ~asr_disable_mask, asr_write_addr); } reg = inb(asr_read_addr); spin_unlock(&asr_lock); } static void asr_disable(void) { unsigned char reg; spin_lock(&asr_lock); reg = inb(asr_read_addr); if (asr_type == ASMTYPE_TOPAZ) /* asr_write_addr == asr_read_addr */ outb(reg | TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE, asr_read_addr); else { outb(reg | asr_toggle_mask, asr_write_addr); reg = inb(asr_read_addr); outb(reg | asr_disable_mask, asr_write_addr); } reg = inb(asr_read_addr); spin_unlock(&asr_lock); } static int __init asr_get_base_address(void) { unsigned char low, high; const char *type = ""; asr_length = 1; switch (asr_type) { case ASMTYPE_TOPAZ: /* SELECT SuperIO CHIP FOR QUERYING (WRITE 0x07 TO BOTH 0x2E and 0x2F) */ outb(0x07, 0x2e); outb(0x07, 0x2f); /* SELECT AND READ THE HIGH-NIBBLE OF THE GPIO BASE ADDRESS */ outb(0x60, 0x2e); high = inb(0x2f); /* SELECT AND READ THE LOW-NIBBLE OF THE GPIO BASE ADDRESS */ outb(0x61, 0x2e); low = inb(0x2f); asr_base = (high << 16) | low; asr_read_addr = asr_write_addr = asr_base + TOPAZ_ASR_REG_OFFSET; asr_length = 5; break; case ASMTYPE_JASPER: type = "Jaspers "; #if 0 u32 r; /* Suggested fix */ pdev = pci_get_bus_and_slot(0, DEVFN(0x1f, 0)); if (pdev == NULL) return -ENODEV; pci_read_config_dword(pdev, 0x58, &r); asr_base = r & 0xFFFE; pci_dev_put(pdev); #else /* FIXME: need to use pci_config_lock here, but it's not exported */ /* spin_lock_irqsave(&pci_config_lock, flags);*/ /* Select the SuperIO chip in the PCI I/O port register */ outl(0x8000f858, 0xcf8); /* BUS 0, Slot 1F, fnc 0, offset 58 */ /* * Read the base address for the SuperIO chip. * Only the lower 16 bits are valid, but the address is word * aligned so the last bit must be masked off. */ asr_base = inl(0xcfc) & 0xfffe; /* spin_unlock_irqrestore(&pci_config_lock, flags);*/ #endif asr_read_addr = asr_write_addr = asr_base + JASPER_ASR_REG_OFFSET; asr_toggle_mask = JASPER_ASR_TOGGLE_MASK; asr_disable_mask = JASPER_ASR_DISABLE_MASK; asr_length = JASPER_ASR_REG_OFFSET + 1; break; case ASMTYPE_PEARL: type = "Pearls "; asr_base = PEARL_BASE; asr_read_addr = PEARL_READ; asr_write_addr = PEARL_WRITE; asr_toggle_mask = PEARL_ASR_TOGGLE_MASK; asr_disable_mask = PEARL_ASR_DISABLE_MASK; asr_length = 4; break; case ASMTYPE_JUNIPER: type = "Junipers "; asr_base = JUNIPER_BASE_ADDRESS; asr_read_addr = asr_write_addr = asr_base; asr_toggle_mask = JUNIPER_ASR_TOGGLE_MASK; asr_disable_mask = JUNIPER_ASR_DISABLE_MASK; break; case ASMTYPE_SPRUCE: type = "Spruce's "; asr_base = SPRUCE_BASE_ADDRESS; asr_read_addr = asr_write_addr = asr_base; asr_toggle_mask = SPRUCE_ASR_TOGGLE_MASK; asr_disable_mask = SPRUCE_ASR_DISABLE_MASK; break; } if (!request_region(asr_base, asr_length, "ibmasr")) { pr_err("address %#x already in use\n", asr_base); return -EBUSY; } pr_info("found %sASR @ addr %#x\n", type, asr_base); return 0; } static ssize_t asr_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { if (!nowayout) { size_t i; /* In case it was set long ago */ asr_expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') asr_expect_close = 42; } } asr_toggle(); } return count; } static long asr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "IBM ASR", }; void __user *argp = (void __user *)arg; int __user *p = argp; int heartbeat; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { asr_disable(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { asr_enable(); asr_toggle(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: asr_toggle(); return 0; /* * The hardware has a fixed timeout value, so no WDIOC_SETTIMEOUT * and WDIOC_GETTIMEOUT always returns 256. */ case WDIOC_GETTIMEOUT: heartbeat = 256; return put_user(heartbeat, p); default: return -ENOTTY; } } static int asr_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &asr_is_open)) return -EBUSY; asr_toggle(); asr_enable(); return nonseekable_open(inode, file); } static int asr_release(struct inode *inode, struct file *file) { if (asr_expect_close == 42) asr_disable(); else { pr_crit("unexpected close, not stopping watchdog!\n"); asr_toggle(); } clear_bit(0, &asr_is_open); asr_expect_close = 0; return 0; } static const struct file_operations asr_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = asr_write, .unlocked_ioctl = asr_ioctl, .open = asr_open, .release = asr_release, }; static struct miscdevice asr_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &asr_fops, }; struct ibmasr_id { const char *desc; int type; }; static struct ibmasr_id __initdata ibmasr_id_table[] = { { "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ }, { "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL }, { "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER }, { "IBM Automatic Server Restart - Machine Type 8482", ASMTYPE_JUNIPER }, { "IBM Automatic Server Restart - Machine Type 8648", ASMTYPE_SPRUCE }, { NULL } }; static int __init ibmasr_init(void) { struct ibmasr_id *id; int rc; for (id = ibmasr_id_table; id->desc; id++) { if (dmi_find_device(DMI_DEV_TYPE_OTHER, id->desc, NULL)) { asr_type = id->type; break; } } if (!asr_type) return -ENODEV; rc = asr_get_base_address(); if (rc) return rc; rc = misc_register(&asr_miscdev); if (rc < 0) { release_region(asr_base, asr_length); pr_err("failed to register misc device\n"); return rc; } return 0; } static void __exit ibmasr_exit(void) { if (!nowayout) asr_disable(); misc_deregister(&asr_miscdev); release_region(asr_base, asr_length); } module_init(ibmasr_init); module_exit(ibmasr_exit); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_DESCRIPTION("IBM Automatic Server Restart driver"); MODULE_AUTHOR("Andrey Panin"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
ayeric/android_kernel_motorola_ghost
drivers/watchdog/alim1535_wdt.c
7383
10169
/* * Watchdog for the 7101 PMU version found in the ALi M1535 chipsets * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/io.h> #define WATCHDOG_NAME "ALi_M1535" #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ /* internal variables */ static unsigned long ali_is_open; static char ali_expect_release; static struct pci_dev *ali_pci; static u32 ali_timeout_bits; /* stores the computed timeout */ static DEFINE_SPINLOCK(ali_lock); /* Guards the hardware */ /* module parameters */ static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (0 < timeout < 18000, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * ali_start - start watchdog countdown * * Starts the timer running providing the timer has a counter * configuration set. */ static void ali_start(void) { u32 val; spin_lock(&ali_lock); pci_read_config_dword(ali_pci, 0xCC, &val); val &= ~0x3F; /* Mask count */ val |= (1 << 25) | ali_timeout_bits; pci_write_config_dword(ali_pci, 0xCC, val); spin_unlock(&ali_lock); } /* * ali_stop - stop the timer countdown * * Stop the ALi watchdog countdown */ static void ali_stop(void) { u32 val; spin_lock(&ali_lock); pci_read_config_dword(ali_pci, 0xCC, &val); val &= ~0x3F; /* Mask count to zero (disabled) */ val &= ~(1 << 25); /* and for safety mask the reset enable */ pci_write_config_dword(ali_pci, 0xCC, val); spin_unlock(&ali_lock); } /* * ali_keepalive - send a keepalive to the watchdog * * Send a keepalive to the timer (actually we restart the timer). */ static void ali_keepalive(void) { ali_start(); } /* * ali_settimer - compute the timer reload value * @t: time in seconds * * Computes the timeout values needed */ static int ali_settimer(int t) { if (t < 0) return -EINVAL; else if (t < 60) ali_timeout_bits = t|(1 << 6); else if (t < 3600) ali_timeout_bits = (t / 60)|(1 << 7); else if (t < 18000) ali_timeout_bits = (t / 300)|(1 << 6)|(1 << 7); else return -EINVAL; timeout = t; return 0; } /* * /dev/watchdog handling */ /* * ali_write - writes to ALi watchdog * @file: file from VFS * @data: user address of data * @len: length of data * @ppos: pointer to the file offset * * Handle a write to the ALi watchdog. Writing to the file pings * the watchdog and resets it. Writing the magic 'V' sequence allows * the next close to turn off the watchdog. */ static ssize_t ali_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character five months ago... */ ali_expect_release = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') ali_expect_release = 42; } } /* someone wrote to us, we should reload the timer */ ali_start(); } return len; } /* * ali_ioctl - handle watchdog ioctls * @file: VFS file pointer * @cmd: ioctl number * @arg: arguments to the ioctl * * Handle the watchdog ioctls supported by the ALi driver. Really * we want an extension to enable irq ack monitoring and the like */ static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "ALi M1535 WatchDog Timer", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { ali_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { ali_start(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: ali_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; if (ali_settimer(new_timeout)) return -EINVAL; ali_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } /* * ali_open - handle open of ali watchdog * @inode: inode from VFS * @file: file from VFS * * Open the ALi watchdog device. Ensure only one person opens it * at a time. Also start the watchdog running. */ static int ali_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &ali_is_open)) return -EBUSY; /* Activate */ ali_start(); return nonseekable_open(inode, file); } /* * ali_release - close an ALi watchdog * @inode: inode from VFS * @file: file from VFS * * Close the ALi watchdog device. Actual shutdown of the timer * only occurs if the magic sequence has been set. */ static int ali_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (ali_expect_release == 42) ali_stop(); else { pr_crit("Unexpected close, not stopping watchdog!\n"); ali_keepalive(); } clear_bit(0, &ali_is_open); ali_expect_release = 0; return 0; } /* * ali_notify_sys - System down notifier * * Notifier for system down */ static int ali_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) ali_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = { { PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,}, { 0, }, }; MODULE_DEVICE_TABLE(pci, ali_pci_tbl); /* * ali_find_watchdog - find a 1535 and 7101 * * Scans the PCI hardware for a 1535 series bridge and matching 7101 * watchdog device. This may be overtight but it is better to be safe */ static int __init ali_find_watchdog(void) { struct pci_dev *pdev; u32 wdog; /* Check for a 1533/1535 series bridge */ pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1535, NULL); if (pdev == NULL) pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1533, NULL); if (pdev == NULL) return -ENODEV; pci_dev_put(pdev); /* Check for the a 7101 PMU */ pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x7101, NULL); if (pdev == NULL) return -ENODEV; if (pci_enable_device(pdev)) { pci_dev_put(pdev); return -EIO; } ali_pci = pdev; /* * Initialize the timer bits */ pci_read_config_dword(pdev, 0xCC, &wdog); /* Timer bits */ wdog &= ~0x3F; /* Issued events */ wdog &= ~((1 << 27)|(1 << 26)|(1 << 25)|(1 << 24)); /* No monitor bits */ wdog &= ~((1 << 16)|(1 << 13)|(1 << 12)|(1 << 11)|(1 << 10)|(1 << 9)); pci_write_config_dword(pdev, 0xCC, wdog); return 0; } /* * Kernel Interfaces */ static const struct file_operations ali_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = ali_write, .unlocked_ioctl = ali_ioctl, .open = ali_open, .release = ali_release, }; static struct miscdevice ali_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ali_fops, }; static struct notifier_block ali_notifier = { .notifier_call = ali_notify_sys, }; /* * watchdog_init - module initialiser * * Scan for a suitable watchdog and if so initialize it. Return an error * if we cannot, the error causes the module to unload */ static int __init watchdog_init(void) { int ret; /* Check whether or not the hardware watchdog is there */ if (ali_find_watchdog() != 0) return -ENODEV; /* Check that the timeout value is within it's range; if not reset to the default */ if (timeout < 1 || timeout >= 18000) { timeout = WATCHDOG_TIMEOUT; pr_info("timeout value must be 0 < timeout < 18000, using %d\n", timeout); } /* Calculate the watchdog's timeout */ ali_settimer(timeout); ret = register_reboot_notifier(&ali_notifier); if (ret != 0) { pr_err("cannot register reboot notifier (err=%d)\n", ret); goto out; } ret = misc_register(&ali_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_reboot; } pr_info("initialized. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); out: return ret; unreg_reboot: unregister_reboot_notifier(&ali_notifier); goto out; } /* * watchdog_exit - module de-initialiser * * Called while unloading a successfully installed watchdog module. */ static void __exit watchdog_exit(void) { /* Stop the timer before we leave */ ali_stop(); /* Deregister */ misc_deregister(&ali_miscdev); unregister_reboot_notifier(&ali_notifier); pci_dev_put(ali_pci); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("ALi M1535 PMU Watchdog Timer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
DirtyUnicorns/android_kernel_htc_m4
drivers/message/i2o/i2o_block.c
8151
31612
/* * Block OSM * * Copyright (C) 1999-2002 Red Hat Software * * Written by Alan Cox, Building Number Three Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * For the purpose of avoiding doubt the preferred form of the work * for making modifications shall be a standards compliant form such * gzipped tar and not one requiring a proprietary or patent encumbered * tool to unpack. * * Fixes/additions: * Steve Ralston: * Multiple device handling error fixes, * Added a queue depth. * Alan Cox: * FC920 has an rmw bug. Dont or in the end marker. * Removed queue walk, fixed for 64bitness. * Rewrote much of the code over time * Added indirect block lists * Handle 64K limits on many controllers * Don't use indirects on the Promise (breaks) * Heavily chop down the queue depths * Deepak Saxena: * Independent queues per IOP * Support for dynamic device creation/deletion * Code cleanup * Support for larger I/Os through merge* functions * (taken from DAC960 driver) * Boji T Kannanthanam: * Set the I2O Block devices to be detected in increasing * order of TIDs during boot. * Search and set the I2O block device that we boot off * from as the first device to be claimed (as /dev/i2o/hda) * Properly attach/detach I2O gendisk structure from the * system gendisk list. The I2O block devices now appear in * /proc/partitions. * Markus Lidel <Markus.Lidel@shadowconnect.com>: * Minor bugfixes for 2.6. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2o.h> #include <linux/mutex.h> #include <linux/mempool.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <scsi/scsi.h> #include "i2o_block.h" #define OSM_NAME "block-osm" #define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O Block Device OSM" static DEFINE_MUTEX(i2o_block_mutex); static struct i2o_driver i2o_block_driver; /* global Block OSM request mempool */ static struct i2o_block_mempool i2o_blk_req_pool; /* Block OSM class handling definition */ static struct i2o_class_id i2o_block_class_id[] = { {I2O_CLASS_RANDOM_BLOCK_STORAGE}, {I2O_CLASS_END} }; /** * i2o_block_device_free - free the memory of the I2O Block device * @dev: I2O Block device, which should be cleaned up * * Frees the request queue, gendisk and the i2o_block_device structure. */ static void i2o_block_device_free(struct i2o_block_device *dev) { blk_cleanup_queue(dev->gd->queue); put_disk(dev->gd); kfree(dev); }; /** * i2o_block_remove - remove the I2O Block device from the system again * @dev: I2O Block device which should be removed * * Remove gendisk from system and free all allocated memory. * * Always returns 0. */ static int i2o_block_remove(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, i2o_blk_dev->gd->disk_name); i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); del_gendisk(i2o_blk_dev->gd); dev_set_drvdata(dev, NULL); i2o_device_claim_release(i2o_dev); i2o_block_device_free(i2o_blk_dev); return 0; }; /** * i2o_block_device flush - Flush all dirty data of I2O device dev * @dev: I2O device which should be flushed * * Flushes all dirty data on device dev. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_flush(struct i2o_device *dev) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(60 << 16); osm_debug("Flushing...\n"); return i2o_msg_post_wait(dev->iop, msg, 60); }; /** * i2o_block_device_mount - Mount (load) the media of device dev * @dev: I2O device which should receive the mount request * @media_id: Media Identifier * * Load a media into drive. Identifier should be set to -1, because the * spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(-1); msg->body[1] = cpu_to_le32(0x00000000); osm_debug("Mounting...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_lock - Locks the media of device dev * @dev: I2O device which should receive the lock request * @media_id: Media Identifier * * Lock media of device dev to prevent removal. The media identifier * should be set to -1, because the spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(-1); osm_debug("Locking...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_unlock - Unlocks the media of device dev * @dev: I2O device which should receive the unlocked request * @media_id: Media Identifier * * Unlocks the media in device dev. The media identifier should be set to * -1, because the spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(media_id); osm_debug("Unlocking...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_power - Power management for device dev * @dev: I2O device which should receive the power management request * @op: Operation to send * * Send a power management request to the device dev. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) { struct i2o_device *i2o_dev = dev->i2o_dev; struct i2o_controller *c = i2o_dev->iop; struct i2o_message *msg; int rc; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> lct_data.tid); msg->body[0] = cpu_to_le32(op << 24); osm_debug("Power...\n"); rc = i2o_msg_post_wait(c, msg, 60); if (!rc) dev->power = op; return rc; }; /** * i2o_block_request_alloc - Allocate an I2O block request struct * * Allocates an I2O block request struct and initialize the list. * * Returns a i2o_block_request pointer on success or negative error code * on failure. */ static inline struct i2o_block_request *i2o_block_request_alloc(void) { struct i2o_block_request *ireq; ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); if (!ireq) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ireq->queue); sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); return ireq; }; /** * i2o_block_request_free - Frees a I2O block request * @ireq: I2O block request which should be freed * * Frees the allocated memory (give it back to the request mempool). */ static inline void i2o_block_request_free(struct i2o_block_request *ireq) { mempool_free(ireq, i2o_blk_req_pool.pool); }; /** * i2o_block_sglist_alloc - Allocate the SG list and map it * @c: I2O controller to which the request belongs * @ireq: I2O block request * @mptr: message body pointer * * Builds the SG list and map it to be accessible by the controller. * * Returns 0 on failure or 1 on success. */ static inline int i2o_block_sglist_alloc(struct i2o_controller *c, struct i2o_block_request *ireq, u32 ** mptr) { int nents; enum dma_data_direction direction; ireq->dev = &c->pdev->dev; nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); if (rq_data_dir(ireq->req) == READ) direction = PCI_DMA_FROMDEVICE; else direction = PCI_DMA_TODEVICE; ireq->sg_nents = nents; return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); }; /** * i2o_block_sglist_free - Frees the SG list * @ireq: I2O block request from which the SG should be freed * * Frees the SG list from the I2O block request. */ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) { enum dma_data_direction direction; if (rq_data_dir(ireq->req) == READ) direction = PCI_DMA_FROMDEVICE; else direction = PCI_DMA_TODEVICE; dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); }; /** * i2o_block_prep_req_fn - Allocates I2O block device specific struct * @q: request queue for the request * @req: the request to prepare * * Allocate the necessary i2o_block_request struct and connect it to * the request. This is needed that we not lose the SG list later on. * * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. */ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) { struct i2o_block_device *i2o_blk_dev = q->queuedata; struct i2o_block_request *ireq; if (unlikely(!i2o_blk_dev)) { osm_err("block device already removed\n"); return BLKPREP_KILL; } /* connect the i2o_block_request to the request */ if (!req->special) { ireq = i2o_block_request_alloc(); if (IS_ERR(ireq)) { osm_debug("unable to allocate i2o_block_request!\n"); return BLKPREP_DEFER; } ireq->i2o_blk_dev = i2o_blk_dev; req->special = ireq; ireq->req = req; } /* do not come back here */ req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }; /** * i2o_block_delayed_request_fn - delayed request queue function * @work: the delayed request with the queue to start * * If the request queue is stopped for a disk, and there is no open * request, a new event is created, which calls this function to start * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never * be started again. */ static void i2o_block_delayed_request_fn(struct work_struct *work) { struct i2o_block_delayed_request *dreq = container_of(work, struct i2o_block_delayed_request, work.work); struct request_queue *q = dreq->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); kfree(dreq); }; /** * i2o_block_end_request - Post-processing of completed commands * @req: request which should be completed * @error: 0 for success, < 0 for error * @nr_bytes: number of bytes to complete * * Mark the request as complete. The lock must not be held when entering. * */ static void i2o_block_end_request(struct request *req, int error, int nr_bytes) { struct i2o_block_request *ireq = req->special; struct i2o_block_device *dev = ireq->i2o_blk_dev; struct request_queue *q = req->q; unsigned long flags; if (blk_end_request(req, error, nr_bytes)) if (error) blk_end_request_all(req, -EIO); spin_lock_irqsave(q->queue_lock, flags); if (likely(dev)) { dev->open_queue_depth--; list_del(&ireq->queue); } blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); i2o_block_sglist_free(ireq); i2o_block_request_free(ireq); }; /** * i2o_block_reply - Block OSM reply handler. * @c: I2O controller from which the message arrives * @m: message id of reply * @msg: the actual I2O message reply * * This function gets all the message replies. * */ static int i2o_block_reply(struct i2o_controller *c, u32 m, struct i2o_message *msg) { struct request *req; int error = 0; req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); if (unlikely(!req)) { osm_err("NULL reply received!\n"); return -1; } /* * Lets see what is cooking. We stuffed the * request in the context. */ if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { u32 status = le32_to_cpu(msg->body[0]); /* * Device not ready means two things. One is that the * the thing went offline (but not a removal media) * * The second is that you have a SuperTrak 100 and the * firmware got constipated. Unlike standard i2o card * setups the supertrak returns an error rather than * blocking for the timeout in these cases. * * Don't stick a supertrak100 into cache aggressive modes */ osm_err("TID %03x error status: 0x%02x, detailed status: " "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), status >> 24, status & 0xffff); req->errors++; error = -EIO; } i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); return 1; }; static void i2o_block_event(struct work_struct *work) { struct i2o_event *evt = container_of(work, struct i2o_event, work); osm_debug("event received\n"); kfree(evt); }; /* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common * perhaps genhd ? * * LBA -> CHS mapping table taken from: * * "Incorporating the I2O Architecture into BIOS for Intel Architecture * Platforms" * * This is an I2O document that is only available to I2O members, * not developers. * * From my understanding, this is how all the I2O cards do this * * Disk Size | Sectors | Heads | Cylinders * ---------------+---------+-------+------------------- * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * */ #define BLOCK_SIZE_528M 1081344 #define BLOCK_SIZE_1G 2097152 #define BLOCK_SIZE_21G 4403200 #define BLOCK_SIZE_42G 8806400 #define BLOCK_SIZE_84G 17612800 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, unsigned char *hds, unsigned char *secs) { unsigned long heads, sectors, cylinders; sectors = 63L; /* Maximize sectors per track */ if (capacity <= BLOCK_SIZE_528M) heads = 16; else if (capacity <= BLOCK_SIZE_1G) heads = 32; else if (capacity <= BLOCK_SIZE_21G) heads = 64; else if (capacity <= BLOCK_SIZE_42G) heads = 128; else heads = 255; cylinders = (unsigned long)capacity / (heads * sectors); *cyls = (unsigned short)cylinders; /* Stuff return values */ *secs = (unsigned char)sectors; *hds = (unsigned char)heads; } /** * i2o_block_open - Open the block device * @bdev: block device being opened * @mode: file open mode * * Power up the device, mount and lock the media. This function is called, * if the block device is opened for access. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_open(struct block_device *bdev, fmode_t mode) { struct i2o_block_device *dev = bdev->bd_disk->private_data; if (!dev->i2o_dev) return -ENODEV; mutex_lock(&i2o_block_mutex); if (dev->power > 0x1f) i2o_block_device_power(dev, 0x02); i2o_block_device_mount(dev->i2o_dev, -1); i2o_block_device_lock(dev->i2o_dev, -1); osm_debug("Ready.\n"); mutex_unlock(&i2o_block_mutex); return 0; }; /** * i2o_block_release - Release the I2O block device * @disk: gendisk device being released * @mode: file open mode * * Unlock and unmount the media, and power down the device. Gets called if * the block device is closed. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_release(struct gendisk *disk, fmode_t mode) { struct i2o_block_device *dev = disk->private_data; u8 operation; /* * This is to deail with the case of an application * opening a device and then the device disappears while * it's in use, and then the application tries to release * it. ex: Unmounting a deleted RAID volume at reboot. * If we send messages, it will just cause FAILs since * the TID no longer exists. */ if (!dev->i2o_dev) return 0; mutex_lock(&i2o_block_mutex); i2o_block_device_flush(dev->i2o_dev); i2o_block_device_unlock(dev->i2o_dev, -1); if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ operation = 0x21; else operation = 0x24; i2o_block_device_power(dev, operation); mutex_unlock(&i2o_block_mutex); return 0; } static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) { i2o_block_biosparam(get_capacity(bdev->bd_disk), &geo->cylinders, &geo->heads, &geo->sectors); return 0; } /** * i2o_block_ioctl - Issue device specific ioctl calls. * @bdev: block device being opened * @mode: file open mode * @cmd: ioctl command * @arg: arg * * Handles ioctl request for the block device. * * Return 0 on success or negative error on failure. */ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; struct i2o_block_device *dev = disk->private_data; int ret = -ENOTTY; /* Anyone capable of this syscall can do *real bad* things */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&i2o_block_mutex); switch (cmd) { case BLKI2OGRSTRAT: ret = put_user(dev->rcache, (int __user *)arg); break; case BLKI2OGWSTRAT: ret = put_user(dev->wcache, (int __user *)arg); break; case BLKI2OSRSTRAT: ret = -EINVAL; if (arg < 0 || arg > CACHE_SMARTFETCH) break; dev->rcache = arg; ret = 0; break; case BLKI2OSWSTRAT: ret = -EINVAL; if (arg != 0 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) break; dev->wcache = arg; ret = 0; break; } mutex_unlock(&i2o_block_mutex); return ret; }; /** * i2o_block_check_events - Have we seen a media change? * @disk: gendisk which should be verified * @clearing: events being cleared * * Verifies if the media has changed. * * Returns 1 if the media was changed or 0 otherwise. */ static unsigned int i2o_block_check_events(struct gendisk *disk, unsigned int clearing) { struct i2o_block_device *p = disk->private_data; if (p->media_change_flag) { p->media_change_flag = 0; return DISK_EVENT_MEDIA_CHANGE; } return 0; } /** * i2o_block_transfer - Transfer a request to/from the I2O controller * @req: the request which should be transferred * * This function converts the request into a I2O message. The necessary * DMA buffers are allocated and after everything is setup post the message * to the I2O controller. No cleanup is done by this function. It is done * on the interrupt side when the reply arrives. * * Return 0 on success or negative error code on failure. */ static int i2o_block_transfer(struct request *req) { struct i2o_block_device *dev = req->rq_disk->private_data; struct i2o_controller *c; u32 tid; struct i2o_message *msg; u32 *mptr; struct i2o_block_request *ireq = req->special; u32 tcntxt; u32 sgl_offset = SGL_OFFSET_8; u32 ctl_flags = 0x00000000; int rc; u32 cmd; if (unlikely(!dev->i2o_dev)) { osm_err("transfer to removed drive\n"); rc = -ENODEV; goto exit; } tid = dev->i2o_dev->lct_data.tid; c = dev->i2o_dev->iop; msg = i2o_msg_get(c); if (IS_ERR(msg)) { rc = PTR_ERR(msg); goto exit; } tcntxt = i2o_cntxt_list_add(c, req); if (!tcntxt) { rc = -ENOMEM; goto nop_msg; } msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); msg->u.s.tcntxt = cpu_to_le32(tcntxt); mptr = &msg->body[0]; if (rq_data_dir(req) == READ) { cmd = I2O_CMD_BLOCK_READ << 24; switch (dev->rcache) { case CACHE_PREFETCH: ctl_flags = 0x201F0008; break; case CACHE_SMARTFETCH: if (blk_rq_sectors(req) > 16) ctl_flags = 0x201F0008; else ctl_flags = 0x001F0000; break; default: break; } } else { cmd = I2O_CMD_BLOCK_WRITE << 24; switch (dev->wcache) { case CACHE_WRITETHROUGH: ctl_flags = 0x001F0008; break; case CACHE_WRITEBACK: ctl_flags = 0x001F0010; break; case CACHE_SMARTBACK: if (blk_rq_sectors(req) > 16) ctl_flags = 0x001F0004; else ctl_flags = 0x001F0010; break; case CACHE_SMARTTHROUGH: if (blk_rq_sectors(req) > 16) ctl_flags = 0x001F0004; else ctl_flags = 0x001F0010; default: break; } } #ifdef CONFIG_I2O_EXT_ADAPTEC if (c->adaptec) { u8 cmd[10]; u32 scsi_flags; u16 hwsec; hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; memset(cmd, 0, 10); sgl_offset = SGL_OFFSET_12; msg->u.head[1] = cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); *mptr++ = cpu_to_le32(tid); /* * ENABLE_DISCONNECT * SIMPLE_TAG * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME */ if (rq_data_dir(req) == READ) { cmd[0] = READ_10; scsi_flags = 0x60a0000a; } else { cmd[0] = WRITE_10; scsi_flags = 0xa0a0000a; } *mptr++ = cpu_to_le32(scsi_flags); *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); memcpy(mptr, cmd, 10); mptr += 4; *mptr++ = cpu_to_le32(blk_rq_bytes(req)); } else #endif { msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); *mptr++ = cpu_to_le32(ctl_flags); *mptr++ = cpu_to_le32(blk_rq_bytes(req)); *mptr++ = cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); *mptr++ = cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); } if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { rc = -ENOMEM; goto context_remove; } msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); list_add_tail(&ireq->queue, &dev->open_queue); dev->open_queue_depth++; i2o_msg_post(c, msg); return 0; context_remove: i2o_cntxt_list_remove(c, req); nop_msg: i2o_msg_nop(c, msg); exit: return rc; }; /** * i2o_block_request_fn - request queue handling function * @q: request queue from which the request could be fetched * * Takes the next request from the queue, transfers it and if no error * occurs dequeue it from the queue. On arrival of the reply the message * will be processed further. If an error occurs requeue the request. */ static void i2o_block_request_fn(struct request_queue *q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) { struct i2o_block_delayed_request *dreq; struct i2o_block_request *ireq = req->special; unsigned int queue_depth; queue_depth = ireq->i2o_blk_dev->open_queue_depth; if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { if (!i2o_block_transfer(req)) { blk_start_request(req); continue; } else osm_info("transfer error\n"); } if (queue_depth) break; /* stop the queue and retry later */ dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); if (!dreq) continue; dreq->queue = q; INIT_DELAYED_WORK(&dreq->work, i2o_block_delayed_request_fn); if (!queue_delayed_work(i2o_block_driver.event_queue, &dreq->work, I2O_BLOCK_RETRY_TIME)) kfree(dreq); else { blk_stop_queue(q); break; } } else { blk_start_request(req); __blk_end_request_all(req, -EIO); } } }; /* I2O Block device operations definition */ static const struct block_device_operations i2o_block_fops = { .owner = THIS_MODULE, .open = i2o_block_open, .release = i2o_block_release, .ioctl = i2o_block_ioctl, .compat_ioctl = i2o_block_ioctl, .getgeo = i2o_block_getgeo, .check_events = i2o_block_check_events, }; /** * i2o_block_device_alloc - Allocate memory for a I2O Block device * * Allocate memory for the i2o_block_device struct, gendisk and request * queue and initialize them as far as no additional information is needed. * * Returns a pointer to the allocated I2O Block device on success or a * negative error code on failure. */ static struct i2o_block_device *i2o_block_device_alloc(void) { struct i2o_block_device *dev; struct gendisk *gd; struct request_queue *queue; int rc; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { osm_err("Insufficient memory to allocate I2O Block disk.\n"); rc = -ENOMEM; goto exit; } INIT_LIST_HEAD(&dev->open_queue); spin_lock_init(&dev->lock); dev->rcache = CACHE_PREFETCH; dev->wcache = CACHE_WRITEBACK; /* allocate a gendisk with 16 partitions */ gd = alloc_disk(16); if (!gd) { osm_err("Insufficient memory to allocate gendisk.\n"); rc = -ENOMEM; goto cleanup_dev; } /* initialize the request queue */ queue = blk_init_queue(i2o_block_request_fn, &dev->lock); if (!queue) { osm_err("Insufficient memory to allocate request queue.\n"); rc = -ENOMEM; goto cleanup_queue; } blk_queue_prep_rq(queue, i2o_block_prep_req_fn); gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; gd->private_data = dev; dev->gd = gd; return dev; cleanup_queue: put_disk(gd); cleanup_dev: kfree(dev); exit: return ERR_PTR(rc); }; /** * i2o_block_probe - verify if dev is a I2O Block device and install it * @dev: device to verify if it is a I2O Block device * * We only verify if the user_tid of the device is 0xfff and then install * the device. Otherwise it is used by some other device (e. g. RAID). * * Returns 0 on success or negative error code on failure. */ static int i2o_block_probe(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); struct i2o_controller *c = i2o_dev->iop; struct i2o_block_device *i2o_blk_dev; struct gendisk *gd; struct request_queue *queue; static int unit = 0; int rc; u64 size; u32 blocksize; u16 body_size = 4; u16 power; unsigned short max_sectors; #ifdef CONFIG_I2O_EXT_ADAPTEC if (c->adaptec) body_size = 8; #endif if (c->limit_sectors) max_sectors = I2O_MAX_SECTORS_LIMITED; else max_sectors = I2O_MAX_SECTORS; /* skip devices which are used by IOP */ if (i2o_dev->lct_data.user_tid != 0xfff) { osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); return -ENODEV; } if (i2o_device_claim(i2o_dev)) { osm_warn("Unable to claim device. Installation aborted\n"); rc = -EFAULT; goto exit; } i2o_blk_dev = i2o_block_device_alloc(); if (IS_ERR(i2o_blk_dev)) { osm_err("could not alloc a new I2O block device"); rc = PTR_ERR(i2o_blk_dev); goto claim_release; } i2o_blk_dev->i2o_dev = i2o_dev; dev_set_drvdata(dev, i2o_blk_dev); /* setup gendisk */ gd = i2o_blk_dev->gd; gd->first_minor = unit << 4; sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); gd->driverfs_dev = &i2o_dev->device; /* setup request queue */ queue = gd->queue; queue->queuedata = i2o_blk_dev; blk_queue_max_hw_sectors(queue, max_sectors); blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); osm_debug("max sectors = %d\n", queue->max_sectors); osm_debug("phys segments = %d\n", queue->max_phys_segments); osm_debug("max hw segments = %d\n", queue->max_hw_segments); /* * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); } else osm_warn("unable to get blocksize of %s\n", gd->disk_name); if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); } else osm_warn("could not get size of %s\n", gd->disk_name); if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) i2o_blk_dev->power = power; i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); add_disk(gd); unit++; osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, i2o_blk_dev->gd->disk_name); return 0; claim_release: i2o_device_claim_release(i2o_dev); exit: return rc; }; /* Block OSM driver struct */ static struct i2o_driver i2o_block_driver = { .name = OSM_NAME, .event = i2o_block_event, .reply = i2o_block_reply, .classes = i2o_block_class_id, .driver = { .probe = i2o_block_probe, .remove = i2o_block_remove, }, }; /** * i2o_block_init - Block OSM initialization function * * Allocate the slab and mempool for request structs, registers i2o_block * block device and finally register the Block OSM in the I2O core. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_block_init(void) { int rc; int size; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); /* Allocate request mempool and slab */ size = sizeof(struct i2o_block_request); i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!i2o_blk_req_pool.slab) { osm_err("can't init request slab\n"); rc = -ENOMEM; goto exit; } i2o_blk_req_pool.pool = mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, i2o_blk_req_pool.slab); if (!i2o_blk_req_pool.pool) { osm_err("can't init request mempool\n"); rc = -ENOMEM; goto free_slab; } /* Register the block device interfaces */ rc = register_blkdev(I2O_MAJOR, "i2o_block"); if (rc) { osm_err("unable to register block device\n"); goto free_mempool; } #ifdef MODULE osm_info("registered device at major %d\n", I2O_MAJOR); #endif /* Register Block OSM into I2O core */ rc = i2o_driver_register(&i2o_block_driver); if (rc) { osm_err("Could not register Block driver\n"); goto unregister_blkdev; } return 0; unregister_blkdev: unregister_blkdev(I2O_MAJOR, "i2o_block"); free_mempool: mempool_destroy(i2o_blk_req_pool.pool); free_slab: kmem_cache_destroy(i2o_blk_req_pool.slab); exit: return rc; }; /** * i2o_block_exit - Block OSM exit function * * Unregisters Block OSM from I2O core, unregisters i2o_block block device * and frees the mempool and slab. */ static void __exit i2o_block_exit(void) { /* Unregister I2O Block OSM from I2O core */ i2o_driver_unregister(&i2o_block_driver); /* Unregister block device */ unregister_blkdev(I2O_MAJOR, "i2o_block"); /* Free request mempool and slab */ mempool_destroy(i2o_blk_req_pool.pool); kmem_cache_destroy(i2o_blk_req_pool.slab); }; MODULE_AUTHOR("Red Hat"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_block_init); module_exit(i2o_block_exit);
gpl-2.0
ShieldKteam/shield_osprey
net/rds/ib_stats.c
12503
2903
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/percpu.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "rds.h" #include "ib.h" DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); static const char *const rds_ib_stat_names[] = { "ib_connect_raced", "ib_listen_closed_stale", "ib_tx_cq_call", "ib_tx_cq_event", "ib_tx_ring_full", "ib_tx_throttle", "ib_tx_sg_mapping_failure", "ib_tx_stalled", "ib_tx_credit_updates", "ib_rx_cq_call", "ib_rx_cq_event", "ib_rx_ring_empty", "ib_rx_refill_from_cq", "ib_rx_refill_from_thread", "ib_rx_alloc_limit", "ib_rx_credit_updates", "ib_ack_sent", "ib_ack_send_failure", "ib_ack_send_delayed", "ib_ack_send_piggybacked", "ib_ack_received", "ib_rdma_mr_alloc", "ib_rdma_mr_free", "ib_rdma_mr_used", "ib_rdma_mr_pool_flush", "ib_rdma_mr_pool_wait", "ib_rdma_mr_pool_depleted", "ib_atomic_cswp", "ib_atomic_fadd", }; unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail) { struct rds_ib_statistics stats = {0, }; uint64_t *src; uint64_t *sum; size_t i; int cpu; if (avail < ARRAY_SIZE(rds_ib_stat_names)) goto out; for_each_online_cpu(cpu) { src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); sum = (uint64_t *)&stats; for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++) *(sum++) += *(src++); } rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names, ARRAY_SIZE(rds_ib_stat_names)); out: return ARRAY_SIZE(rds_ib_stat_names); }
gpl-2.0
SlimSaber/android_kernel_oppo_msm8974
drivers/infiniband/core/uverbs_main.c
216
25164
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/cdev.h> #include <linux/anon_inodes.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "uverbs.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UVERBS_MAJOR = 231, IB_UVERBS_BASE_MINOR = 192, IB_UVERBS_MAX_DEVICES = 32 }; #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) static struct class *uverbs_class; DEFINE_SPINLOCK(ib_uverbs_idr_lock); DEFINE_IDR(ib_uverbs_pd_idr); DEFINE_IDR(ib_uverbs_mr_idr); DEFINE_IDR(ib_uverbs_mw_idr); DEFINE_IDR(ib_uverbs_ah_idr); DEFINE_IDR(ib_uverbs_cq_idr); DEFINE_IDR(ib_uverbs_qp_idr); DEFINE_IDR(ib_uverbs_srq_idr); DEFINE_IDR(ib_uverbs_xrcd_idr); static DEFINE_SPINLOCK(map_lock); static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) = { [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp }; static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device); static void ib_uverbs_release_dev(struct kobject *kobj) { struct ib_uverbs_device *dev = container_of(kobj, struct ib_uverbs_device, kobj); kfree(dev); } static struct kobj_type ib_uverbs_dev_ktype = { .release = ib_uverbs_release_dev, }; static void ib_uverbs_release_event_file(struct kref *ref) { struct ib_uverbs_event_file *file = container_of(ref, struct ib_uverbs_event_file, ref); kfree(file); } void ib_uverbs_release_ucq(struct ib_uverbs_file *file, struct ib_uverbs_event_file *ev_file, struct ib_ucq_object *uobj) { struct ib_uverbs_event *evt, *tmp; if (ev_file) { spin_lock_irq(&ev_file->lock); list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&ev_file->lock); kref_put(&ev_file->ref, ib_uverbs_release_event_file); } spin_lock_irq(&file->async_file->lock); list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&file->async_file->lock); } void ib_uverbs_release_uevent(struct ib_uverbs_file *file, struct ib_uevent_object *uobj) { struct ib_uverbs_event *evt, *tmp; spin_lock_irq(&file->async_file->lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&file->async_file->lock); } static void ib_uverbs_detach_umcast(struct ib_qp *qp, struct ib_uqp_object *uobj) { struct ib_uverbs_mcast_entry *mcast, *tmp; list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { ib_detach_mcast(qp, &mcast->gid, mcast->lid); list_del(&mcast->list); kfree(mcast); } } static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, struct ib_ucontext *context) { struct ib_uobject *uobj, *tmp; if (!context) return 0; context->closing = 1; list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { struct ib_ah *ah = uobj->object; idr_remove_uobj(&ib_uverbs_ah_idr, uobj); ib_destroy_ah(ah); kfree(uobj); } list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { struct ib_qp *qp = uobj->object; struct ib_uqp_object *uqp = container_of(uobj, struct ib_uqp_object, uevent.uobject); idr_remove_uobj(&ib_uverbs_qp_idr, uobj); if (qp != qp->real_qp) { ib_close_qp(qp); } else { ib_uverbs_detach_umcast(qp, uqp); ib_destroy_qp(qp); } ib_uverbs_release_uevent(file, &uqp->uevent); kfree(uqp); } list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { struct ib_cq *cq = uobj->object; struct ib_uverbs_event_file *ev_file = cq->cq_context; struct ib_ucq_object *ucq = container_of(uobj, struct ib_ucq_object, uobject); idr_remove_uobj(&ib_uverbs_cq_idr, uobj); ib_destroy_cq(cq); ib_uverbs_release_ucq(file, ev_file, ucq); kfree(ucq); } list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { struct ib_srq *srq = uobj->object; struct ib_uevent_object *uevent = container_of(uobj, struct ib_uevent_object, uobject); idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } /* XXX Free MWs */ list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { struct ib_mr *mr = uobj->object; idr_remove_uobj(&ib_uverbs_mr_idr, uobj); ib_dereg_mr(mr); kfree(uobj); } mutex_lock(&file->device->xrcd_tree_mutex); list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) { struct ib_xrcd *xrcd = uobj->object; struct ib_uxrcd_object *uxrcd = container_of(uobj, struct ib_uxrcd_object, uobject); idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); ib_uverbs_dealloc_xrcd(file->device, xrcd); kfree(uxrcd); } mutex_unlock(&file->device->xrcd_tree_mutex); list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { struct ib_pd *pd = uobj->object; idr_remove_uobj(&ib_uverbs_pd_idr, uobj); ib_dealloc_pd(pd); kfree(uobj); } return context->device->dealloc_ucontext(context); } static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) { complete(&dev->comp); } static void ib_uverbs_release_file(struct kref *ref) { struct ib_uverbs_file *file = container_of(ref, struct ib_uverbs_file, ref); module_put(file->device->ib_dev->owner); if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); kfree(file); } static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_event_file *file = filp->private_data; struct ib_uverbs_event *event; int eventsz; int ret = 0; spin_lock_irq(&file->lock); while (list_empty(&file->event_list)) { spin_unlock_irq(&file->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; spin_lock_irq(&file->lock); } event = list_entry(file->event_list.next, struct ib_uverbs_event, list); if (file->is_async) eventsz = sizeof (struct ib_uverbs_async_event_desc); else eventsz = sizeof (struct ib_uverbs_comp_event_desc); if (eventsz > count) { ret = -EINVAL; event = NULL; } else { list_del(file->event_list.next); if (event->counter) { ++(*event->counter); list_del(&event->obj_list); } } spin_unlock_irq(&file->lock); if (event) { if (copy_to_user(buf, event, eventsz)) ret = -EFAULT; else ret = eventsz; } kfree(event); return ret; } static unsigned int ib_uverbs_event_poll(struct file *filp, struct poll_table_struct *wait) { unsigned int pollflags = 0; struct ib_uverbs_event_file *file = filp->private_data; poll_wait(filp, &file->poll_wait, wait); spin_lock_irq(&file->lock); if (!list_empty(&file->event_list)) pollflags = POLLIN | POLLRDNORM; spin_unlock_irq(&file->lock); return pollflags; } static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) { struct ib_uverbs_event_file *file = filp->private_data; return fasync_helper(fd, filp, on, &file->async_queue); } static int ib_uverbs_event_close(struct inode *inode, struct file *filp) { struct ib_uverbs_event_file *file = filp->private_data; struct ib_uverbs_event *entry, *tmp; spin_lock_irq(&file->lock); file->is_closed = 1; list_for_each_entry_safe(entry, tmp, &file->event_list, list) { if (entry->counter) list_del(&entry->obj_list); kfree(entry); } spin_unlock_irq(&file->lock); if (file->is_async) { ib_unregister_event_handler(&file->uverbs_file->event_handler); kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); } kref_put(&file->ref, ib_uverbs_release_event_file); return 0; } static const struct file_operations uverbs_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_event_read, .poll = ib_uverbs_event_poll, .release = ib_uverbs_event_close, .fasync = ib_uverbs_event_fasync, .llseek = no_llseek, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) { struct ib_uverbs_event_file *file = cq_context; struct ib_ucq_object *uobj; struct ib_uverbs_event *entry; unsigned long flags; if (!file) return; spin_lock_irqsave(&file->lock, flags); if (file->is_closed) { spin_unlock_irqrestore(&file->lock, flags); return; } entry = kmalloc(sizeof *entry, GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&file->lock, flags); return; } uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); entry->desc.comp.cq_handle = cq->uobject->user_handle; entry->counter = &uobj->comp_events_reported; list_add_tail(&entry->list, &file->event_list); list_add_tail(&entry->obj_list, &uobj->comp_list); spin_unlock_irqrestore(&file->lock, flags); wake_up_interruptible(&file->poll_wait); kill_fasync(&file->async_queue, SIGIO, POLL_IN); } static void ib_uverbs_async_handler(struct ib_uverbs_file *file, __u64 element, __u64 event, struct list_head *obj_list, u32 *counter) { struct ib_uverbs_event *entry; unsigned long flags; spin_lock_irqsave(&file->async_file->lock, flags); if (file->async_file->is_closed) { spin_unlock_irqrestore(&file->async_file->lock, flags); return; } entry = kmalloc(sizeof *entry, GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&file->async_file->lock, flags); return; } entry->desc.async.element = element; entry->desc.async.event_type = event; entry->desc.async.reserved = 0; entry->counter = counter; list_add_tail(&entry->list, &file->async_file->event_list); if (obj_list) list_add_tail(&entry->obj_list, obj_list); spin_unlock_irqrestore(&file->async_file->lock, flags); wake_up_interruptible(&file->async_file->poll_wait); kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN); } void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, struct ib_ucq_object, uobject); ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle, event->event, &uobj->async_list, &uobj->async_events_reported); } void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj; uobj = container_of(event->element.qp->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); } void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj; uobj = container_of(event->element.srq->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); } void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event) { struct ib_uverbs_file *file = container_of(handler, struct ib_uverbs_file, event_handler); ib_uverbs_async_handler(file, event->element.port_num, event->event, NULL, NULL); } struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, int is_async) { struct ib_uverbs_event_file *ev_file; struct file *filp; ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); if (!ev_file) return ERR_PTR(-ENOMEM); kref_init(&ev_file->ref); spin_lock_init(&ev_file->lock); INIT_LIST_HEAD(&ev_file->event_list); init_waitqueue_head(&ev_file->poll_wait); ev_file->uverbs_file = uverbs_file; ev_file->async_queue = NULL; ev_file->is_async = is_async; ev_file->is_closed = 0; filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops, ev_file, O_RDONLY); if (IS_ERR(filp)) kfree(ev_file); return filp; } /* * Look up a completion event file by FD. If lookup is successful, * takes a ref to the event file struct that it returns; if * unsuccessful, returns NULL. */ struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) { struct ib_uverbs_event_file *ev_file = NULL; struct file *filp; filp = fget(fd); if (!filp) return NULL; if (filp->f_op != &uverbs_event_fops) goto out; ev_file = filp->private_data; if (ev_file->is_async) { ev_file = NULL; goto out; } kref_get(&ev_file->ref); out: fput(filp); return ev_file; } static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_cmd_hdr hdr; if (count < sizeof hdr) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof hdr)) return -EFAULT; if (hdr.in_words * 4 != count) return -EINVAL; if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || !uverbs_cmd_table[hdr.command]) return -EINVAL; if (!file->ucontext && hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) return -EINVAL; if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) return -ENOSYS; return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, hdr.in_words * 4, hdr.out_words * 4); } static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) { struct ib_uverbs_file *file = filp->private_data; if (!file->ucontext) return -ENODEV; else return file->device->ib_dev->mmap(file->ucontext, vma); } /* * ib_uverbs_open() does not need the BKL: * * - the ib_uverbs_device structures are properly reference counted and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - there is no ioctl method to race against; * - the open method will either immediately run -ENXIO, or all * required initialization will be done. */ static int ib_uverbs_open(struct inode *inode, struct file *filp) { struct ib_uverbs_device *dev; struct ib_uverbs_file *file; int ret; dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); if (!atomic_inc_not_zero(&dev->refcount)) return -ENXIO; if (!try_module_get(dev->ib_dev->owner)) { ret = -ENODEV; goto err; } file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) { ret = -ENOMEM; goto err_module; } file->device = dev; file->ucontext = NULL; file->async_file = NULL; kref_init(&file->ref); mutex_init(&file->mutex); filp->private_data = file; kobject_get(&dev->kobj); return nonseekable_open(inode, filp); err_module: module_put(dev->ib_dev->owner); err: if (atomic_dec_and_test(&dev->refcount)) ib_uverbs_comp_dev(dev); return ret; } static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_device *dev = file->device; ib_uverbs_cleanup_ucontext(file, file->ucontext); if (file->async_file) kref_put(&file->async_file->ref, ib_uverbs_release_event_file); kref_put(&file->ref, ib_uverbs_release_file); kobject_put(&dev->kobj); return 0; } static const struct file_operations uverbs_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, }; static const struct file_operations uverbs_mmap_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, }; static struct ib_client uverbs_client = { .name = "uverbs", .add = ib_uverbs_add_one, .remove = ib_uverbs_remove_one }; static ssize_t show_ibdev(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = dev_get_drvdata(device); if (!dev) return -ENODEV; return sprintf(buf, "%s\n", dev->ib_dev->name); } static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static ssize_t show_dev_abi_version(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = dev_get_drvdata(device); if (!dev) return -ENODEV; return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver); } static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL); static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_VERBS_ABI_VERSION)); static dev_t overflow_maj; static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES); /* * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by * requesting a new major number and doubling the number of max devices we * support. It's stupid, but simple. */ static int find_overflow_devnum(void) { int ret; if (!overflow_maj) { ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, "infiniband_verbs"); if (ret) { printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); return ret; } } ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES); if (ret >= IB_UVERBS_MAX_DEVICES) return -1; return ret; } static void ib_uverbs_add_one(struct ib_device *device) { int devnum; dev_t base; struct ib_uverbs_device *uverbs_dev; if (!device->alloc_ucontext) return; uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL); if (!uverbs_dev) return; atomic_set(&uverbs_dev->refcount, 1); init_completion(&uverbs_dev->comp); uverbs_dev->xrcd_tree = RB_ROOT; mutex_init(&uverbs_dev->xrcd_tree_mutex); kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype); spin_lock(&map_lock); devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); if (devnum >= IB_UVERBS_MAX_DEVICES) { spin_unlock(&map_lock); devnum = find_overflow_devnum(); if (devnum < 0) goto err; spin_lock(&map_lock); uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES; base = devnum + overflow_maj; set_bit(devnum, overflow_map); } else { uverbs_dev->devnum = devnum; base = devnum + IB_UVERBS_BASE_DEV; set_bit(devnum, dev_map); } spin_unlock(&map_lock); uverbs_dev->ib_dev = device; uverbs_dev->num_comp_vectors = device->num_comp_vectors; cdev_init(&uverbs_dev->cdev, NULL); uverbs_dev->cdev.owner = THIS_MODULE; uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj; kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); if (cdev_add(&uverbs_dev->cdev, base, 1)) goto err_cdev; uverbs_dev->dev = device_create(uverbs_class, device->dma_device, uverbs_dev->cdev.dev, uverbs_dev, "uverbs%d", uverbs_dev->devnum); if (IS_ERR(uverbs_dev->dev)) goto err_cdev; if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev)) goto err_class; if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) goto err_class; ib_set_client_data(device, &uverbs_client, uverbs_dev); return; err_class: device_destroy(uverbs_class, uverbs_dev->cdev.dev); err_cdev: cdev_del(&uverbs_dev->cdev); if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) clear_bit(devnum, dev_map); else clear_bit(devnum, overflow_map); err: if (atomic_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); kobject_put(&uverbs_dev->kobj); return; } static void ib_uverbs_remove_one(struct ib_device *device) { struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client); if (!uverbs_dev) return; dev_set_drvdata(uverbs_dev->dev, NULL); device_destroy(uverbs_class, uverbs_dev->cdev.dev); cdev_del(&uverbs_dev->cdev); if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) clear_bit(uverbs_dev->devnum, dev_map); else clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); if (atomic_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); kobject_put(&uverbs_dev->kobj); } static char *uverbs_devnode(struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } static int __init ib_uverbs_init(void) { int ret; ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, "infiniband_verbs"); if (ret) { printk(KERN_ERR "user_verbs: couldn't register device number\n"); goto out; } uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); if (IS_ERR(uverbs_class)) { ret = PTR_ERR(uverbs_class); printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); goto out_chrdev; } uverbs_class->devnode = uverbs_devnode; ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); if (ret) { printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&uverbs_client); if (ret) { printk(KERN_ERR "user_verbs: couldn't register client\n"); goto out_class; } return 0; out_class: class_destroy(uverbs_class); out_chrdev: unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); out: return ret; } static void __exit ib_uverbs_cleanup(void) { ib_unregister_client(&uverbs_client); class_destroy(uverbs_class); unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); if (overflow_maj) unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES); idr_destroy(&ib_uverbs_pd_idr); idr_destroy(&ib_uverbs_mr_idr); idr_destroy(&ib_uverbs_mw_idr); idr_destroy(&ib_uverbs_ah_idr); idr_destroy(&ib_uverbs_cq_idr); idr_destroy(&ib_uverbs_qp_idr); idr_destroy(&ib_uverbs_srq_idr); } module_init(ib_uverbs_init); module_exit(ib_uverbs_cleanup);
gpl-2.0
thebohemian/android-buglabs-kernel
kernel/kexec.c
472
38419
/* * kexec.c - kexec system call * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/capability.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/kexec.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <linux/reboot.h> #include <linux/ioport.h> #include <linux/hardirq.h> #include <linux/elf.h> #include <linux/elfcore.h> #include <linux/utsrelease.h> #include <linux/utsname.h> #include <linux/numa.h> #include <linux/suspend.h> #include <linux/device.h> #include <linux/freezer.h> #include <linux/pm.h> #include <linux/cpu.h> #include <linux/console.h> #include <linux/vmalloc.h> #include <asm/page.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> /* Per cpu memory for storing cpu states in case of system crash. */ note_buf_t* crash_notes; /* vmcoreinfo stuff */ static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; size_t vmcoreinfo_size; size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); /* Location of the reserved area for the crash kernel */ struct resource crashk_res = { .name = "Crash kernel", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; int kexec_should_crash(struct task_struct *p) { if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) return 1; return 0; } /* * When kexec transitions to the new kernel there is a one-to-one * mapping between physical and virtual addresses. On processors * where you can disable the MMU this is trivial, and easy. For * others it is still a simple predictable page table to setup. * * In that environment kexec copies the new kernel to its final * resting place. This means I can only support memory whose * physical address can fit in an unsigned long. In particular * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. * If the assembly stub has more restrictive requirements * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be * defined more restrictively in <asm/kexec.h>. * * The code for the transition from the current kernel to the * the new kernel is placed in the control_code_buffer, whose size * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single * page of memory is necessary, but some architectures require more. * Because this memory must be identity mapped in the transition from * virtual to physical addresses it must live in the range * 0 - TASK_SIZE, as only the user space mappings are arbitrarily * modifiable. * * The assembly stub in the control code buffer is passed a linked list * of descriptor pages detailing the source pages of the new kernel, * and the destination addresses of those source pages. As this data * structure is not used in the context of the current OS, it must * be self-contained. * * The code has been made to work with highmem pages and will use a * destination page in its final resting place (if it happens * to allocate it). The end product of this is that most of the * physical address space, and most of RAM can be used. * * Future directions include: * - allocating a page table with the control code buffer identity * mapped, to simplify machine_kexec and make kexec_on_panic more * reliable. */ /* * KIMAGE_NO_DEST is an impossible destination address..., for * allocating pages whose destination address we do not care about. */ #define KIMAGE_NO_DEST (-1UL) static int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end); static struct page *kimage_alloc_page(struct kimage *image, gfp_t gfp_mask, unsigned long dest); static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments) { size_t segment_bytes; struct kimage *image; unsigned long i; int result; /* Allocate a controlling structure */ result = -ENOMEM; image = kzalloc(sizeof(*image), GFP_KERNEL); if (!image) goto out; image->head = 0; image->entry = &image->head; image->last_entry = &image->head; image->control_page = ~0; /* By default this does not apply */ image->start = entry; image->type = KEXEC_TYPE_DEFAULT; /* Initialize the list of control pages */ INIT_LIST_HEAD(&image->control_pages); /* Initialize the list of destination pages */ INIT_LIST_HEAD(&image->dest_pages); /* Initialize the list of unuseable pages */ INIT_LIST_HEAD(&image->unuseable_pages); /* Read in the segments */ image->nr_segments = nr_segments; segment_bytes = nr_segments * sizeof(*segments); result = copy_from_user(image->segment, segments, segment_bytes); if (result) goto out; /* * Verify we have good destination addresses. The caller is * responsible for making certain we don't attempt to load * the new image into invalid or reserved areas of RAM. This * just verifies it is an address we can use. * * Since the kernel does everything in page size chunks ensure * the destination addreses are page aligned. Too many * special cases crop of when we don't do this. The most * insidious is getting overlapping destination addresses * simply because addresses are changed to page size * granularity. */ result = -EADDRNOTAVAIL; for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) goto out; if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) goto out; } /* Verify our destination addresses do not overlap. * If we alloed overlapping destination addresses * through very weird things can happen with no * easy explanation as one segment stops on another. */ result = -EINVAL; for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; unsigned long j; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; for (j = 0; j < i; j++) { unsigned long pstart, pend; pstart = image->segment[j].mem; pend = pstart + image->segment[j].memsz; /* Do the segments overlap ? */ if ((mend > pstart) && (mstart < pend)) goto out; } } /* Ensure our buffer sizes are strictly less than * our memory sizes. This should always be the case, * and it is easier to check up front than to be surprised * later on. */ result = -EINVAL; for (i = 0; i < nr_segments; i++) { if (image->segment[i].bufsz > image->segment[i].memsz) goto out; } result = 0; out: if (result == 0) *rimage = image; else kfree(image); return result; } static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments) { int result; struct kimage *image; /* Allocate and initialize a controlling structure */ image = NULL; result = do_kimage_alloc(&image, entry, nr_segments, segments); if (result) goto out; *rimage = image; /* * Find a location for the control code buffer, and add it * the vector of segments so that it's pages will also be * counted as destination pages. */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; } image->swap_page = kimage_alloc_control_pages(image, 0); if (!image->swap_page) { printk(KERN_ERR "Could not allocate swap buffer\n"); goto out; } result = 0; out: if (result == 0) *rimage = image; else kfree(image); return result; } static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments) { int result; struct kimage *image; unsigned long i; image = NULL; /* Verify we have a valid entry point */ if ((entry < crashk_res.start) || (entry > crashk_res.end)) { result = -EADDRNOTAVAIL; goto out; } /* Allocate and initialize a controlling structure */ result = do_kimage_alloc(&image, entry, nr_segments, segments); if (result) goto out; /* Enable the special crash kernel control page * allocation policy. */ image->control_page = crashk_res.start; image->type = KEXEC_TYPE_CRASH; /* * Verify we have good destination addresses. Normally * the caller is responsible for making certain we don't * attempt to load the new image into invalid or reserved * areas of RAM. But crash kernels are preloaded into a * reserved area of ram. We must ensure the addresses * are in the reserved area otherwise preloading the * kernel could corrupt things. */ result = -EADDRNOTAVAIL; for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz - 1; /* Ensure we are within the crash kernel limits */ if ((mstart < crashk_res.start) || (mend > crashk_res.end)) goto out; } /* * Find a location for the control code buffer, and add * the vector of segments so that it's pages will also be * counted as destination pages. */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; } result = 0; out: if (result == 0) *rimage = image; else kfree(image); return result; } static int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end) { unsigned long i; for (i = 0; i < image->nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; if ((end > mstart) && (start < mend)) return 1; } return 0; } static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) { struct page *pages; pages = alloc_pages(gfp_mask, order); if (pages) { unsigned int count, i; pages->mapping = NULL; set_page_private(pages, order); count = 1 << order; for (i = 0; i < count; i++) SetPageReserved(pages + i); } return pages; } static void kimage_free_pages(struct page *page) { unsigned int order, count, i; order = page_private(page); count = 1 << order; for (i = 0; i < count; i++) ClearPageReserved(page + i); __free_pages(page, order); } static void kimage_free_page_list(struct list_head *list) { struct list_head *pos, *next; list_for_each_safe(pos, next, list) { struct page *page; page = list_entry(pos, struct page, lru); list_del(&page->lru); kimage_free_pages(page); } } static struct page *kimage_alloc_normal_control_pages(struct kimage *image, unsigned int order) { /* Control pages are special, they are the intermediaries * that are needed while we copy the rest of the pages * to their final resting place. As such they must * not conflict with either the destination addresses * or memory the kernel is already using. * * The only case where we really need more than one of * these are for architectures where we cannot disable * the MMU and must instead generate an identity mapped * page table for all of the memory. * * At worst this runs in O(N) of the image size. */ struct list_head extra_pages; struct page *pages; unsigned int count; count = 1 << order; INIT_LIST_HEAD(&extra_pages); /* Loop while I can allocate a page and the page allocated * is a destination page. */ do { unsigned long pfn, epfn, addr, eaddr; pages = kimage_alloc_pages(GFP_KERNEL, order); if (!pages) break; pfn = page_to_pfn(pages); epfn = pfn + count; addr = pfn << PAGE_SHIFT; eaddr = epfn << PAGE_SHIFT; if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || kimage_is_destination_range(image, addr, eaddr)) { list_add(&pages->lru, &extra_pages); pages = NULL; } } while (!pages); if (pages) { /* Remember the allocated page... */ list_add(&pages->lru, &image->control_pages); /* Because the page is already in it's destination * location we will never allocate another page at * that address. Therefore kimage_alloc_pages * will not return it (again) and we don't need * to give it an entry in image->segment[]. */ } /* Deal with the destination pages I have inadvertently allocated. * * Ideally I would convert multi-page allocations into single * page allocations, and add everyting to image->dest_pages. * * For now it is simpler to just free the pages. */ kimage_free_page_list(&extra_pages); return pages; } static struct page *kimage_alloc_crash_control_pages(struct kimage *image, unsigned int order) { /* Control pages are special, they are the intermediaries * that are needed while we copy the rest of the pages * to their final resting place. As such they must * not conflict with either the destination addresses * or memory the kernel is already using. * * Control pages are also the only pags we must allocate * when loading a crash kernel. All of the other pages * are specified by the segments and we just memcpy * into them directly. * * The only case where we really need more than one of * these are for architectures where we cannot disable * the MMU and must instead generate an identity mapped * page table for all of the memory. * * Given the low demand this implements a very simple * allocator that finds the first hole of the appropriate * size in the reserved memory region, and allocates all * of the memory up to and including the hole. */ unsigned long hole_start, hole_end, size; struct page *pages; pages = NULL; size = (1 << order) << PAGE_SHIFT; hole_start = (image->control_page + (size - 1)) & ~(size - 1); hole_end = hole_start + size - 1; while (hole_end <= crashk_res.end) { unsigned long i; if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) break; if (hole_end > crashk_res.end) break; /* See if I overlap any of the segments */ for (i = 0; i < image->nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz - 1; if ((hole_end >= mstart) && (hole_start <= mend)) { /* Advance the hole to the end of the segment */ hole_start = (mend + (size - 1)) & ~(size - 1); hole_end = hole_start + size - 1; break; } } /* If I don't overlap any segments I have found my hole! */ if (i == image->nr_segments) { pages = pfn_to_page(hole_start >> PAGE_SHIFT); break; } } if (pages) image->control_page = hole_end; return pages; } struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order) { struct page *pages = NULL; switch (image->type) { case KEXEC_TYPE_DEFAULT: pages = kimage_alloc_normal_control_pages(image, order); break; case KEXEC_TYPE_CRASH: pages = kimage_alloc_crash_control_pages(image, order); break; } return pages; } static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) { if (*image->entry != 0) image->entry++; if (image->entry == image->last_entry) { kimage_entry_t *ind_page; struct page *page; page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); if (!page) return -ENOMEM; ind_page = page_address(page); *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; image->entry = ind_page; image->last_entry = ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); } *image->entry = entry; image->entry++; *image->entry = 0; return 0; } static int kimage_set_destination(struct kimage *image, unsigned long destination) { int result; destination &= PAGE_MASK; result = kimage_add_entry(image, destination | IND_DESTINATION); if (result == 0) image->destination = destination; return result; } static int kimage_add_page(struct kimage *image, unsigned long page) { int result; page &= PAGE_MASK; result = kimage_add_entry(image, page | IND_SOURCE); if (result == 0) image->destination += PAGE_SIZE; return result; } static void kimage_free_extra_pages(struct kimage *image) { /* Walk through and free any extra destination pages I may have */ kimage_free_page_list(&image->dest_pages); /* Walk through and free any unuseable pages I have cached */ kimage_free_page_list(&image->unuseable_pages); } static void kimage_terminate(struct kimage *image) { if (*image->entry != 0) image->entry++; *image->entry = IND_DONE; } #define for_each_kimage_entry(image, ptr, entry) \ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ ptr = (entry & IND_INDIRECTION)? \ phys_to_virt((entry & PAGE_MASK)): ptr +1) static void kimage_free_entry(kimage_entry_t entry) { struct page *page; page = pfn_to_page(entry >> PAGE_SHIFT); kimage_free_pages(page); } static void kimage_free(struct kimage *image) { kimage_entry_t *ptr, entry; kimage_entry_t ind = 0; if (!image) return; kimage_free_extra_pages(image); for_each_kimage_entry(image, ptr, entry) { if (entry & IND_INDIRECTION) { /* Free the previous indirection page */ if (ind & IND_INDIRECTION) kimage_free_entry(ind); /* Save this indirection page until we are * done with it. */ ind = entry; } else if (entry & IND_SOURCE) kimage_free_entry(entry); } /* Free the final indirection page */ if (ind & IND_INDIRECTION) kimage_free_entry(ind); /* Handle any machine specific cleanup */ machine_kexec_cleanup(image); /* Free the kexec control pages... */ kimage_free_page_list(&image->control_pages); kfree(image); } static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) { kimage_entry_t *ptr, entry; unsigned long destination = 0; for_each_kimage_entry(image, ptr, entry) { if (entry & IND_DESTINATION) destination = entry & PAGE_MASK; else if (entry & IND_SOURCE) { if (page == destination) return ptr; destination += PAGE_SIZE; } } return NULL; } static struct page *kimage_alloc_page(struct kimage *image, gfp_t gfp_mask, unsigned long destination) { /* * Here we implement safeguards to ensure that a source page * is not copied to its destination page before the data on * the destination page is no longer useful. * * To do this we maintain the invariant that a source page is * either its own destination page, or it is not a * destination page at all. * * That is slightly stronger than required, but the proof * that no problems will not occur is trivial, and the * implementation is simply to verify. * * When allocating all pages normally this algorithm will run * in O(N) time, but in the worst case it will run in O(N^2) * time. If the runtime is a problem the data structures can * be fixed. */ struct page *page; unsigned long addr; /* * Walk through the list of destination pages, and see if I * have a match. */ list_for_each_entry(page, &image->dest_pages, lru) { addr = page_to_pfn(page) << PAGE_SHIFT; if (addr == destination) { list_del(&page->lru); return page; } } page = NULL; while (1) { kimage_entry_t *old; /* Allocate a page, if we run out of memory give up */ page = kimage_alloc_pages(gfp_mask, 0); if (!page) return NULL; /* If the page cannot be used file it away */ if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { list_add(&page->lru, &image->unuseable_pages); continue; } addr = page_to_pfn(page) << PAGE_SHIFT; /* If it is the destination page we want use it */ if (addr == destination) break; /* If the page is not a destination page use it */ if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) break; /* * I know that the page is someones destination page. * See if there is already a source page for this * destination page. And if so swap the source pages. */ old = kimage_dst_used(image, addr); if (old) { /* If so move it */ unsigned long old_addr; struct page *old_page; old_addr = *old & PAGE_MASK; old_page = pfn_to_page(old_addr >> PAGE_SHIFT); copy_highpage(page, old_page); *old = addr | (*old & ~PAGE_MASK); /* The old page I have found cannot be a * destination page, so return it if it's * gfp_flags honor the ones passed in. */ if (!(gfp_mask & __GFP_HIGHMEM) && PageHighMem(old_page)) { kimage_free_pages(old_page); continue; } addr = old_addr; page = old_page; break; } else { /* Place the page on the destination list I * will use it later. */ list_add(&page->lru, &image->dest_pages); } } return page; } static int kimage_load_normal_segment(struct kimage *image, struct kexec_segment *segment) { unsigned long maddr; unsigned long ubytes, mbytes; int result; unsigned char __user *buf; result = 0; buf = segment->buf; ubytes = segment->bufsz; mbytes = segment->memsz; maddr = segment->mem; result = kimage_set_destination(image, maddr); if (result < 0) goto out; while (mbytes) { struct page *page; char *ptr; size_t uchunk, mchunk; page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); if (!page) { result = -ENOMEM; goto out; } result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); if (result < 0) goto out; ptr = kmap(page); /* Start with a clear page */ memset(ptr, 0, PAGE_SIZE); ptr += maddr & ~PAGE_MASK; mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); if (mchunk > mbytes) mchunk = mbytes; uchunk = mchunk; if (uchunk > ubytes) uchunk = ubytes; result = copy_from_user(ptr, buf, uchunk); kunmap(page); if (result) { result = (result < 0) ? result : -EIO; goto out; } ubytes -= uchunk; maddr += mchunk; buf += mchunk; mbytes -= mchunk; } out: return result; } static int kimage_load_crash_segment(struct kimage *image, struct kexec_segment *segment) { /* For crash dumps kernels we simply copy the data from * user space to it's destination. * We do things a page at a time for the sake of kmap. */ unsigned long maddr; unsigned long ubytes, mbytes; int result; unsigned char __user *buf; result = 0; buf = segment->buf; ubytes = segment->bufsz; mbytes = segment->memsz; maddr = segment->mem; while (mbytes) { struct page *page; char *ptr; size_t uchunk, mchunk; page = pfn_to_page(maddr >> PAGE_SHIFT); if (!page) { result = -ENOMEM; goto out; } ptr = kmap(page); ptr += maddr & ~PAGE_MASK; mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); if (mchunk > mbytes) mchunk = mbytes; uchunk = mchunk; if (uchunk > ubytes) { uchunk = ubytes; /* Zero the trailing part of the page */ memset(ptr + uchunk, 0, mchunk - uchunk); } result = copy_from_user(ptr, buf, uchunk); kexec_flush_icache_page(page); kunmap(page); if (result) { result = (result < 0) ? result : -EIO; goto out; } ubytes -= uchunk; maddr += mchunk; buf += mchunk; mbytes -= mchunk; } out: return result; } static int kimage_load_segment(struct kimage *image, struct kexec_segment *segment) { int result = -ENOMEM; switch (image->type) { case KEXEC_TYPE_DEFAULT: result = kimage_load_normal_segment(image, segment); break; case KEXEC_TYPE_CRASH: result = kimage_load_crash_segment(image, segment); break; } return result; } /* * Exec Kernel system call: for obvious reasons only root may call it. * * This call breaks up into three pieces. * - A generic part which loads the new kernel from the current * address space, and very carefully places the data in the * allocated pages. * * - A generic part that interacts with the kernel and tells all of * the devices to shut down. Preventing on-going dmas, and placing * the devices in a consistent state so a later kernel can * reinitialize them. * * - A machine specific part that includes the syscall number * and the copies the image to it's final destination. And * jumps into the image at entry. * * kexec does not sync, or unmount filesystems so if you need * that to happen you need to do that yourself. */ struct kimage *kexec_image; struct kimage *kexec_crash_image; static DEFINE_MUTEX(kexec_mutex); SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, struct kexec_segment __user *, segments, unsigned long, flags) { struct kimage **dest_image, *image; int result; /* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT)) return -EPERM; /* * Verify we have a legal set of flags * This leaves us room for future extensions. */ if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) return -EINVAL; /* Verify we are on the appropriate architecture */ if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) return -EINVAL; /* Put an artificial cap on the number * of segments passed to kexec_load. */ if (nr_segments > KEXEC_SEGMENT_MAX) return -EINVAL; image = NULL; result = 0; /* Because we write directly to the reserved memory * region when loading crash kernels we need a mutex here to * prevent multiple crash kernels from attempting to load * simultaneously, and to prevent a crash kernel from loading * over the top of a in use crash kernel. * * KISS: always take the mutex. */ if (!mutex_trylock(&kexec_mutex)) return -EBUSY; dest_image = &kexec_image; if (flags & KEXEC_ON_CRASH) dest_image = &kexec_crash_image; if (nr_segments > 0) { unsigned long i; /* Loading another kernel to reboot into */ if ((flags & KEXEC_ON_CRASH) == 0) result = kimage_normal_alloc(&image, entry, nr_segments, segments); /* Loading another kernel to switch to if this one crashes */ else if (flags & KEXEC_ON_CRASH) { /* Free any current crash dump kernel before * we corrupt it. */ kimage_free(xchg(&kexec_crash_image, NULL)); result = kimage_crash_alloc(&image, entry, nr_segments, segments); } if (result) goto out; if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1; result = machine_kexec_prepare(image); if (result) goto out; for (i = 0; i < nr_segments; i++) { result = kimage_load_segment(image, &image->segment[i]); if (result) goto out; } kimage_terminate(image); } /* Install the new kernel, and Uninstall the old */ image = xchg(dest_image, image); out: mutex_unlock(&kexec_mutex); kimage_free(image); return result; } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct compat_kexec_segment __user *segments, unsigned long flags) { struct compat_kexec_segment in; struct kexec_segment out, __user *ksegments; unsigned long i, result; /* Don't allow clients that don't understand the native * architecture to do anything. */ if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; if (nr_segments > KEXEC_SEGMENT_MAX) return -EINVAL; ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); for (i=0; i < nr_segments; i++) { result = copy_from_user(&in, &segments[i], sizeof(in)); if (result) return -EFAULT; out.buf = compat_ptr(in.buf); out.bufsz = in.bufsz; out.mem = in.mem; out.memsz = in.memsz; result = copy_to_user(&ksegments[i], &out, sizeof(out)); if (result) return -EFAULT; } return sys_kexec_load(entry, nr_segments, ksegments, flags); } #endif void crash_kexec(struct pt_regs *regs) { /* Take the kexec_mutex here to prevent sys_kexec_load * running on one cpu from replacing the crash kernel * we are using after a panic on a different cpu. * * If the crash kernel was not located in a fixed area * of memory the xchg(&kexec_crash_image) would be * sufficient. But since I reuse the memory... */ if (mutex_trylock(&kexec_mutex)) { if (kexec_crash_image) { struct pt_regs fixed_regs; crash_setup_regs(&fixed_regs, regs); crash_save_vmcoreinfo(); machine_crash_shutdown(&fixed_regs); machine_kexec(kexec_crash_image); } mutex_unlock(&kexec_mutex); } } static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, size_t data_len) { struct elf_note note; note.n_namesz = strlen(name) + 1; note.n_descsz = data_len; note.n_type = type; memcpy(buf, &note, sizeof(note)); buf += (sizeof(note) + 3)/4; memcpy(buf, name, note.n_namesz); buf += (note.n_namesz + 3)/4; memcpy(buf, data, note.n_descsz); buf += (note.n_descsz + 3)/4; return buf; } static void final_note(u32 *buf) { struct elf_note note; note.n_namesz = 0; note.n_descsz = 0; note.n_type = 0; memcpy(buf, &note, sizeof(note)); } void crash_save_cpu(struct pt_regs *regs, int cpu) { struct elf_prstatus prstatus; u32 *buf; if ((cpu < 0) || (cpu >= nr_cpu_ids)) return; /* Using ELF notes here is opportunistic. * I need a well defined structure format * for the data I pass, and I need tags * on the data to indicate what information I have * squirrelled away. ELF notes happen to provide * all of that, so there is no need to invent something new. */ buf = (u32*)per_cpu_ptr(crash_notes, cpu); if (!buf) return; memset(&prstatus, 0, sizeof(prstatus)); prstatus.pr_pid = current->pid; elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, &prstatus, sizeof(prstatus)); final_note(buf); } static int __init crash_notes_memory_init(void) { /* Allocate memory for saving cpu registers. */ crash_notes = alloc_percpu(note_buf_t); if (!crash_notes) { printk("Kexec: Memory allocation for saving cpu register" " states failed\n"); return -ENOMEM; } return 0; } module_init(crash_notes_memory_init) /* * parsing the "crashkernel" commandline * * this code is intended to be called from architecture specific code */ /* * This function parses command lines in the format * * crashkernel=ramsize-range:size[,...][@offset] * * The function returns 0 on success and -EINVAL on failure. */ static int __init parse_crashkernel_mem(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base) { char *cur = cmdline, *tmp; /* for each entry of the comma-separated list */ do { unsigned long long start, end = ULLONG_MAX, size; /* get the start of the range */ start = memparse(cur, &tmp); if (cur == tmp) { pr_warning("crashkernel: Memory value expected\n"); return -EINVAL; } cur = tmp; if (*cur != '-') { pr_warning("crashkernel: '-' expected\n"); return -EINVAL; } cur++; /* if no ':' is here, than we read the end */ if (*cur != ':') { end = memparse(cur, &tmp); if (cur == tmp) { pr_warning("crashkernel: Memory " "value expected\n"); return -EINVAL; } cur = tmp; if (end <= start) { pr_warning("crashkernel: end <= start\n"); return -EINVAL; } } if (*cur != ':') { pr_warning("crashkernel: ':' expected\n"); return -EINVAL; } cur++; size = memparse(cur, &tmp); if (cur == tmp) { pr_warning("Memory value expected\n"); return -EINVAL; } cur = tmp; if (size >= system_ram) { pr_warning("crashkernel: invalid size\n"); return -EINVAL; } /* match ? */ if (system_ram >= start && system_ram < end) { *crash_size = size; break; } } while (*cur++ == ','); if (*crash_size > 0) { while (*cur && *cur != ' ' && *cur != '@') cur++; if (*cur == '@') { cur++; *crash_base = memparse(cur, &tmp); if (cur == tmp) { pr_warning("Memory value expected " "after '@'\n"); return -EINVAL; } } } return 0; } /* * That function parses "simple" (old) crashkernel command lines like * * crashkernel=size[@offset] * * It returns 0 on success and -EINVAL on failure. */ static int __init parse_crashkernel_simple(char *cmdline, unsigned long long *crash_size, unsigned long long *crash_base) { char *cur = cmdline; *crash_size = memparse(cmdline, &cur); if (cmdline == cur) { pr_warning("crashkernel: memory value expected\n"); return -EINVAL; } if (*cur == '@') *crash_base = memparse(cur+1, &cur); return 0; } /* * That function is the entry point for command line parsing and should be * called from the arch-specific code. */ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base) { char *p = cmdline, *ck_cmdline = NULL; char *first_colon, *first_space; BUG_ON(!crash_size || !crash_base); *crash_size = 0; *crash_base = 0; /* find crashkernel and use the last one if there are more */ p = strstr(p, "crashkernel="); while (p) { ck_cmdline = p; p = strstr(p+1, "crashkernel="); } if (!ck_cmdline) return -EINVAL; ck_cmdline += 12; /* strlen("crashkernel=") */ /* * if the commandline contains a ':', then that's the extended * syntax -- if not, it must be the classic syntax */ first_colon = strchr(ck_cmdline, ':'); first_space = strchr(ck_cmdline, ' '); if (first_colon && (!first_space || first_colon < first_space)) return parse_crashkernel_mem(ck_cmdline, system_ram, crash_size, crash_base); else return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); return 0; } void crash_save_vmcoreinfo(void) { u32 *buf; if (!vmcoreinfo_size) return; vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds()); buf = (u32 *)vmcoreinfo_note; buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, vmcoreinfo_size); final_note(buf); } void vmcoreinfo_append_str(const char *fmt, ...) { va_list args; char buf[0x50]; int r; va_start(args, fmt); r = vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (r + vmcoreinfo_size > vmcoreinfo_max_size) r = vmcoreinfo_max_size - vmcoreinfo_size; memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); vmcoreinfo_size += r; } /* * provide an empty default implementation here -- architecture * code may override this */ void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) {} unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) { return __pa((unsigned long)(char *)&vmcoreinfo_note); } static int __init crash_save_vmcoreinfo_init(void) { VMCOREINFO_OSRELEASE(init_uts_ns.name.release); VMCOREINFO_PAGESIZE(PAGE_SIZE); VMCOREINFO_SYMBOL(init_uts_ns); VMCOREINFO_SYMBOL(node_online_map); VMCOREINFO_SYMBOL(swapper_pg_dir); VMCOREINFO_SYMBOL(_stext); VMCOREINFO_SYMBOL(vmlist); #ifndef CONFIG_NEED_MULTIPLE_NODES VMCOREINFO_SYMBOL(mem_map); VMCOREINFO_SYMBOL(contig_page_data); #endif #ifdef CONFIG_SPARSEMEM VMCOREINFO_SYMBOL(mem_section); VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); VMCOREINFO_STRUCT_SIZE(mem_section); VMCOREINFO_OFFSET(mem_section, section_mem_map); #endif VMCOREINFO_STRUCT_SIZE(page); VMCOREINFO_STRUCT_SIZE(pglist_data); VMCOREINFO_STRUCT_SIZE(zone); VMCOREINFO_STRUCT_SIZE(free_area); VMCOREINFO_STRUCT_SIZE(list_head); VMCOREINFO_SIZE(nodemask_t); VMCOREINFO_OFFSET(page, flags); VMCOREINFO_OFFSET(page, _count); VMCOREINFO_OFFSET(page, mapping); VMCOREINFO_OFFSET(page, lru); VMCOREINFO_OFFSET(pglist_data, node_zones); VMCOREINFO_OFFSET(pglist_data, nr_zones); #ifdef CONFIG_FLAT_NODE_MEM_MAP VMCOREINFO_OFFSET(pglist_data, node_mem_map); #endif VMCOREINFO_OFFSET(pglist_data, node_start_pfn); VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); VMCOREINFO_OFFSET(pglist_data, node_id); VMCOREINFO_OFFSET(zone, free_area); VMCOREINFO_OFFSET(zone, vm_stat); VMCOREINFO_OFFSET(zone, spanned_pages); VMCOREINFO_OFFSET(free_area, free_list); VMCOREINFO_OFFSET(list_head, next); VMCOREINFO_OFFSET(list_head, prev); VMCOREINFO_OFFSET(vm_struct, addr); VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); log_buf_kexec_setup(); VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); VMCOREINFO_NUMBER(NR_FREE_PAGES); VMCOREINFO_NUMBER(PG_lru); VMCOREINFO_NUMBER(PG_private); VMCOREINFO_NUMBER(PG_swapcache); arch_crash_save_vmcoreinfo(); return 0; } module_init(crash_save_vmcoreinfo_init) /* * Move into place and start executing a preloaded standalone * executable. If nothing was preloaded return an error. */ int kernel_kexec(void) { int error = 0; if (!mutex_trylock(&kexec_mutex)) return -EBUSY; if (!kexec_image) { error = -EINVAL; goto Unlock; } #ifdef CONFIG_KEXEC_JUMP if (kexec_image->preserve_context) { mutex_lock(&pm_mutex); pm_prepare_console(); error = freeze_processes(); if (error) { error = -EBUSY; goto Restore_console; } suspend_console(); error = dpm_suspend_start(PMSG_FREEZE); if (error) goto Resume_console; /* At this point, dpm_suspend_start() has been called, * but *not* dpm_suspend_noirq(). We *must* call * dpm_suspend_noirq() now. Otherwise, drivers for * some devices (e.g. interrupt controllers) become * desynchronized with the actual state of the * hardware at resume time, and evil weirdness ensues. */ error = dpm_suspend_noirq(PMSG_FREEZE); if (error) goto Resume_devices; error = disable_nonboot_cpus(); if (error) goto Enable_cpus; local_irq_disable(); /* Suspend system devices */ error = sysdev_suspend(PMSG_FREEZE); if (error) goto Enable_irqs; } else #endif { kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } machine_kexec(kexec_image); #ifdef CONFIG_KEXEC_JUMP if (kexec_image->preserve_context) { sysdev_resume(); Enable_irqs: local_irq_enable(); Enable_cpus: enable_nonboot_cpus(); dpm_resume_noirq(PMSG_RESTORE); Resume_devices: dpm_resume_end(PMSG_RESTORE); Resume_console: resume_console(); thaw_processes(); Restore_console: pm_restore_console(); mutex_unlock(&pm_mutex); } #endif Unlock: mutex_unlock(&kexec_mutex); return error; }
gpl-2.0
Kaisrlik/linux
sound/soc/codecs/sta350.c
472
38259
/* * Codec driver for ST STA350 2.1-channel high-efficiency digital audio system * * Copyright: 2014 Raumfeld GmbH * Author: Sven Brandau <info@brandau.biz> * * based on code from: * Raumfeld GmbH * Johannes Stezenbach <js@sig21.net> * Wolfson Microelectronics PLC. * Mark Brown <broonie@opensource.wolfsonmicro.com> * Freescale Semiconductor, Inc. * Timur Tabi <timur@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s:%d: " fmt, __func__, __LINE__ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/gpio/consumer.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/sta350.h> #include "sta350.h" #define STA350_RATES (SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | \ SNDRV_PCM_RATE_88200 | \ SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_176400 | \ SNDRV_PCM_RATE_192000) #define STA350_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE | \ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE) /* Power-up register defaults */ static const struct reg_default sta350_regs[] = { { 0x0, 0x63 }, { 0x1, 0x80 }, { 0x2, 0xdf }, { 0x3, 0x40 }, { 0x4, 0xc2 }, { 0x5, 0x5c }, { 0x6, 0x00 }, { 0x7, 0xff }, { 0x8, 0x60 }, { 0x9, 0x60 }, { 0xa, 0x60 }, { 0xb, 0x00 }, { 0xc, 0x00 }, { 0xd, 0x00 }, { 0xe, 0x00 }, { 0xf, 0x40 }, { 0x10, 0x80 }, { 0x11, 0x77 }, { 0x12, 0x6a }, { 0x13, 0x69 }, { 0x14, 0x6a }, { 0x15, 0x69 }, { 0x16, 0x00 }, { 0x17, 0x00 }, { 0x18, 0x00 }, { 0x19, 0x00 }, { 0x1a, 0x00 }, { 0x1b, 0x00 }, { 0x1c, 0x00 }, { 0x1d, 0x00 }, { 0x1e, 0x00 }, { 0x1f, 0x00 }, { 0x20, 0x00 }, { 0x21, 0x00 }, { 0x22, 0x00 }, { 0x23, 0x00 }, { 0x24, 0x00 }, { 0x25, 0x00 }, { 0x26, 0x00 }, { 0x27, 0x2a }, { 0x28, 0xc0 }, { 0x29, 0xf3 }, { 0x2a, 0x33 }, { 0x2b, 0x00 }, { 0x2c, 0x0c }, { 0x31, 0x00 }, { 0x36, 0x00 }, { 0x37, 0x00 }, { 0x38, 0x00 }, { 0x39, 0x01 }, { 0x3a, 0xee }, { 0x3b, 0xff }, { 0x3c, 0x7e }, { 0x3d, 0xc0 }, { 0x3e, 0x26 }, { 0x3f, 0x00 }, { 0x48, 0x00 }, { 0x49, 0x00 }, { 0x4a, 0x00 }, { 0x4b, 0x04 }, { 0x4c, 0x00 }, }; static const struct regmap_range sta350_write_regs_range[] = { regmap_reg_range(STA350_CONFA, STA350_AUTO2), regmap_reg_range(STA350_C1CFG, STA350_FDRC2), regmap_reg_range(STA350_EQCFG, STA350_EVOLRES), regmap_reg_range(STA350_NSHAPE, STA350_MISC2), }; static const struct regmap_range sta350_read_regs_range[] = { regmap_reg_range(STA350_CONFA, STA350_AUTO2), regmap_reg_range(STA350_C1CFG, STA350_STATUS), regmap_reg_range(STA350_EQCFG, STA350_EVOLRES), regmap_reg_range(STA350_NSHAPE, STA350_MISC2), }; static const struct regmap_range sta350_volatile_regs_range[] = { regmap_reg_range(STA350_CFADDR2, STA350_CFUD), regmap_reg_range(STA350_STATUS, STA350_STATUS), }; static const struct regmap_access_table sta350_write_regs = { .yes_ranges = sta350_write_regs_range, .n_yes_ranges = ARRAY_SIZE(sta350_write_regs_range), }; static const struct regmap_access_table sta350_read_regs = { .yes_ranges = sta350_read_regs_range, .n_yes_ranges = ARRAY_SIZE(sta350_read_regs_range), }; static const struct regmap_access_table sta350_volatile_regs = { .yes_ranges = sta350_volatile_regs_range, .n_yes_ranges = ARRAY_SIZE(sta350_volatile_regs_range), }; /* regulator power supply names */ static const char * const sta350_supply_names[] = { "vdd-dig", /* digital supply, 3.3V */ "vdd-pll", /* pll supply, 3.3V */ "vcc" /* power amp supply, 5V - 26V */ }; /* codec private data */ struct sta350_priv { struct regmap *regmap; struct regulator_bulk_data supplies[ARRAY_SIZE(sta350_supply_names)]; struct sta350_platform_data *pdata; unsigned int mclk; unsigned int format; u32 coef_shadow[STA350_COEF_COUNT]; int shutdown; struct gpio_desc *gpiod_nreset; struct gpio_desc *gpiod_power_down; struct mutex coeff_lock; }; static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(chvol_tlv, -7950, 50, 1); static const DECLARE_TLV_DB_SCALE(tone_tlv, -1200, 200, 0); static const char * const sta350_drc_ac[] = { "Anti-Clipping", "Dynamic Range Compression" }; static const char * const sta350_auto_gc_mode[] = { "User", "AC no clipping", "AC limited clipping (10%)", "DRC nighttime listening mode" }; static const char * const sta350_auto_xo_mode[] = { "User", "80Hz", "100Hz", "120Hz", "140Hz", "160Hz", "180Hz", "200Hz", "220Hz", "240Hz", "260Hz", "280Hz", "300Hz", "320Hz", "340Hz", "360Hz" }; static const char * const sta350_binary_output[] = { "FFX 3-state output - normal operation", "Binary output" }; static const char * const sta350_limiter_select[] = { "Limiter Disabled", "Limiter #1", "Limiter #2" }; static const char * const sta350_limiter_attack_rate[] = { "3.1584", "2.7072", "2.2560", "1.8048", "1.3536", "0.9024", "0.4512", "0.2256", "0.1504", "0.1123", "0.0902", "0.0752", "0.0645", "0.0564", "0.0501", "0.0451" }; static const char * const sta350_limiter_release_rate[] = { "0.5116", "0.1370", "0.0744", "0.0499", "0.0360", "0.0299", "0.0264", "0.0208", "0.0198", "0.0172", "0.0147", "0.0137", "0.0134", "0.0117", "0.0110", "0.0104" }; static const char * const sta350_noise_shaper_type[] = { "Third order", "Fourth order" }; static DECLARE_TLV_DB_RANGE(sta350_limiter_ac_attack_tlv, 0, 7, TLV_DB_SCALE_ITEM(-1200, 200, 0), 8, 16, TLV_DB_SCALE_ITEM(300, 100, 0), ); static DECLARE_TLV_DB_RANGE(sta350_limiter_ac_release_tlv, 0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0), 1, 1, TLV_DB_SCALE_ITEM(-2900, 0, 0), 2, 2, TLV_DB_SCALE_ITEM(-2000, 0, 0), 3, 8, TLV_DB_SCALE_ITEM(-1400, 200, 0), 8, 16, TLV_DB_SCALE_ITEM(-700, 100, 0), ); static DECLARE_TLV_DB_RANGE(sta350_limiter_drc_attack_tlv, 0, 7, TLV_DB_SCALE_ITEM(-3100, 200, 0), 8, 13, TLV_DB_SCALE_ITEM(-1600, 100, 0), 14, 16, TLV_DB_SCALE_ITEM(-1000, 300, 0), ); static DECLARE_TLV_DB_RANGE(sta350_limiter_drc_release_tlv, 0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0), 1, 2, TLV_DB_SCALE_ITEM(-3800, 200, 0), 3, 4, TLV_DB_SCALE_ITEM(-3300, 200, 0), 5, 12, TLV_DB_SCALE_ITEM(-3000, 200, 0), 13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0), ); static SOC_ENUM_SINGLE_DECL(sta350_drc_ac_enum, STA350_CONFD, STA350_CONFD_DRC_SHIFT, sta350_drc_ac); static SOC_ENUM_SINGLE_DECL(sta350_noise_shaper_enum, STA350_CONFE, STA350_CONFE_NSBW_SHIFT, sta350_noise_shaper_type); static SOC_ENUM_SINGLE_DECL(sta350_auto_gc_enum, STA350_AUTO1, STA350_AUTO1_AMGC_SHIFT, sta350_auto_gc_mode); static SOC_ENUM_SINGLE_DECL(sta350_auto_xo_enum, STA350_AUTO2, STA350_AUTO2_XO_SHIFT, sta350_auto_xo_mode); static SOC_ENUM_SINGLE_DECL(sta350_binary_output_ch1_enum, STA350_C1CFG, STA350_CxCFG_BO_SHIFT, sta350_binary_output); static SOC_ENUM_SINGLE_DECL(sta350_binary_output_ch2_enum, STA350_C2CFG, STA350_CxCFG_BO_SHIFT, sta350_binary_output); static SOC_ENUM_SINGLE_DECL(sta350_binary_output_ch3_enum, STA350_C3CFG, STA350_CxCFG_BO_SHIFT, sta350_binary_output); static SOC_ENUM_SINGLE_DECL(sta350_limiter_ch1_enum, STA350_C1CFG, STA350_CxCFG_LS_SHIFT, sta350_limiter_select); static SOC_ENUM_SINGLE_DECL(sta350_limiter_ch2_enum, STA350_C2CFG, STA350_CxCFG_LS_SHIFT, sta350_limiter_select); static SOC_ENUM_SINGLE_DECL(sta350_limiter_ch3_enum, STA350_C3CFG, STA350_CxCFG_LS_SHIFT, sta350_limiter_select); static SOC_ENUM_SINGLE_DECL(sta350_limiter1_attack_rate_enum, STA350_L1AR, STA350_LxA_SHIFT, sta350_limiter_attack_rate); static SOC_ENUM_SINGLE_DECL(sta350_limiter2_attack_rate_enum, STA350_L2AR, STA350_LxA_SHIFT, sta350_limiter_attack_rate); static SOC_ENUM_SINGLE_DECL(sta350_limiter1_release_rate_enum, STA350_L1AR, STA350_LxR_SHIFT, sta350_limiter_release_rate); static SOC_ENUM_SINGLE_DECL(sta350_limiter2_release_rate_enum, STA350_L2AR, STA350_LxR_SHIFT, sta350_limiter_release_rate); /* * byte array controls for setting biquad, mixer, scaling coefficients; * for biquads all five coefficients need to be set in one go, * mixer and pre/postscale coefs can be set individually; * each coef is 24bit, the bytes are ordered in the same way * as given in the STA350 data sheet (big endian; b1, b2, a1, a2, b0) */ static int sta350_coefficient_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int numcoef = kcontrol->private_value >> 16; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = 3 * numcoef; return 0; } static int sta350_coefficient_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); int numcoef = kcontrol->private_value >> 16; int index = kcontrol->private_value & 0xffff; unsigned int cfud, val; int i, ret = 0; mutex_lock(&sta350->coeff_lock); /* preserve reserved bits in STA350_CFUD */ regmap_read(sta350->regmap, STA350_CFUD, &cfud); cfud &= 0xf0; /* * chip documentation does not say if the bits are self clearing, * so do it explicitly */ regmap_write(sta350->regmap, STA350_CFUD, cfud); regmap_write(sta350->regmap, STA350_CFADDR2, index); if (numcoef == 1) { regmap_write(sta350->regmap, STA350_CFUD, cfud | 0x04); } else if (numcoef == 5) { regmap_write(sta350->regmap, STA350_CFUD, cfud | 0x08); } else { ret = -EINVAL; goto exit_unlock; } for (i = 0; i < 3 * numcoef; i++) { regmap_read(sta350->regmap, STA350_B1CF1 + i, &val); ucontrol->value.bytes.data[i] = val; } exit_unlock: mutex_unlock(&sta350->coeff_lock); return ret; } static int sta350_coefficient_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); int numcoef = kcontrol->private_value >> 16; int index = kcontrol->private_value & 0xffff; unsigned int cfud; int i; /* preserve reserved bits in STA350_CFUD */ regmap_read(sta350->regmap, STA350_CFUD, &cfud); cfud &= 0xf0; /* * chip documentation does not say if the bits are self clearing, * so do it explicitly */ regmap_write(sta350->regmap, STA350_CFUD, cfud); regmap_write(sta350->regmap, STA350_CFADDR2, index); for (i = 0; i < numcoef && (index + i < STA350_COEF_COUNT); i++) sta350->coef_shadow[index + i] = (ucontrol->value.bytes.data[3 * i] << 16) | (ucontrol->value.bytes.data[3 * i + 1] << 8) | (ucontrol->value.bytes.data[3 * i + 2]); for (i = 0; i < 3 * numcoef; i++) regmap_write(sta350->regmap, STA350_B1CF1 + i, ucontrol->value.bytes.data[i]); if (numcoef == 1) regmap_write(sta350->regmap, STA350_CFUD, cfud | 0x01); else if (numcoef == 5) regmap_write(sta350->regmap, STA350_CFUD, cfud | 0x02); else return -EINVAL; return 0; } static int sta350_sync_coef_shadow(struct snd_soc_codec *codec) { struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); unsigned int cfud; int i; /* preserve reserved bits in STA350_CFUD */ regmap_read(sta350->regmap, STA350_CFUD, &cfud); cfud &= 0xf0; for (i = 0; i < STA350_COEF_COUNT; i++) { regmap_write(sta350->regmap, STA350_CFADDR2, i); regmap_write(sta350->regmap, STA350_B1CF1, (sta350->coef_shadow[i] >> 16) & 0xff); regmap_write(sta350->regmap, STA350_B1CF2, (sta350->coef_shadow[i] >> 8) & 0xff); regmap_write(sta350->regmap, STA350_B1CF3, (sta350->coef_shadow[i]) & 0xff); /* * chip documentation does not say if the bits are * self-clearing, so do it explicitly */ regmap_write(sta350->regmap, STA350_CFUD, cfud); regmap_write(sta350->regmap, STA350_CFUD, cfud | 0x01); } return 0; } static int sta350_cache_sync(struct snd_soc_codec *codec) { struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); unsigned int mute; int rc; /* mute during register sync */ regmap_read(sta350->regmap, STA350_CFUD, &mute); regmap_write(sta350->regmap, STA350_MMUTE, mute | STA350_MMUTE_MMUTE); sta350_sync_coef_shadow(codec); rc = regcache_sync(sta350->regmap); regmap_write(sta350->regmap, STA350_MMUTE, mute); return rc; } #define SINGLE_COEF(xname, index) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = sta350_coefficient_info, \ .get = sta350_coefficient_get,\ .put = sta350_coefficient_put, \ .private_value = index | (1 << 16) } #define BIQUAD_COEFS(xname, index) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = sta350_coefficient_info, \ .get = sta350_coefficient_get,\ .put = sta350_coefficient_put, \ .private_value = index | (5 << 16) } static const struct snd_kcontrol_new sta350_snd_controls[] = { SOC_SINGLE_TLV("Master Volume", STA350_MVOL, 0, 0xff, 1, mvol_tlv), /* VOL */ SOC_SINGLE_TLV("Ch1 Volume", STA350_C1VOL, 0, 0xff, 1, chvol_tlv), SOC_SINGLE_TLV("Ch2 Volume", STA350_C2VOL, 0, 0xff, 1, chvol_tlv), SOC_SINGLE_TLV("Ch3 Volume", STA350_C3VOL, 0, 0xff, 1, chvol_tlv), /* CONFD */ SOC_SINGLE("High Pass Filter Bypass Switch", STA350_CONFD, STA350_CONFD_HPB_SHIFT, 1, 1), SOC_SINGLE("De-emphasis Filter Switch", STA350_CONFD, STA350_CONFD_DEMP_SHIFT, 1, 0), SOC_SINGLE("DSP Bypass Switch", STA350_CONFD, STA350_CONFD_DSPB_SHIFT, 1, 0), SOC_SINGLE("Post-scale Link Switch", STA350_CONFD, STA350_CONFD_PSL_SHIFT, 1, 0), SOC_SINGLE("Biquad Coefficient Link Switch", STA350_CONFD, STA350_CONFD_BQL_SHIFT, 1, 0), SOC_ENUM("Compressor/Limiter Switch", sta350_drc_ac_enum), SOC_ENUM("Noise Shaper Bandwidth", sta350_noise_shaper_enum), SOC_SINGLE("Zero-detect Mute Enable Switch", STA350_CONFD, STA350_CONFD_ZDE_SHIFT, 1, 0), SOC_SINGLE("Submix Mode Switch", STA350_CONFD, STA350_CONFD_SME_SHIFT, 1, 0), /* CONFE */ SOC_SINGLE("Zero Cross Switch", STA350_CONFE, STA350_CONFE_ZCE_SHIFT, 1, 0), SOC_SINGLE("Soft Ramp Switch", STA350_CONFE, STA350_CONFE_SVE_SHIFT, 1, 0), /* MUTE */ SOC_SINGLE("Master Switch", STA350_MMUTE, STA350_MMUTE_MMUTE_SHIFT, 1, 1), SOC_SINGLE("Ch1 Switch", STA350_MMUTE, STA350_MMUTE_C1M_SHIFT, 1, 1), SOC_SINGLE("Ch2 Switch", STA350_MMUTE, STA350_MMUTE_C2M_SHIFT, 1, 1), SOC_SINGLE("Ch3 Switch", STA350_MMUTE, STA350_MMUTE_C3M_SHIFT, 1, 1), /* AUTOx */ SOC_ENUM("Automode GC", sta350_auto_gc_enum), SOC_ENUM("Automode XO", sta350_auto_xo_enum), /* CxCFG */ SOC_SINGLE("Ch1 Tone Control Bypass Switch", STA350_C1CFG, STA350_CxCFG_TCB_SHIFT, 1, 0), SOC_SINGLE("Ch2 Tone Control Bypass Switch", STA350_C2CFG, STA350_CxCFG_TCB_SHIFT, 1, 0), SOC_SINGLE("Ch1 EQ Bypass Switch", STA350_C1CFG, STA350_CxCFG_EQBP_SHIFT, 1, 0), SOC_SINGLE("Ch2 EQ Bypass Switch", STA350_C2CFG, STA350_CxCFG_EQBP_SHIFT, 1, 0), SOC_SINGLE("Ch1 Master Volume Bypass Switch", STA350_C1CFG, STA350_CxCFG_VBP_SHIFT, 1, 0), SOC_SINGLE("Ch2 Master Volume Bypass Switch", STA350_C1CFG, STA350_CxCFG_VBP_SHIFT, 1, 0), SOC_SINGLE("Ch3 Master Volume Bypass Switch", STA350_C1CFG, STA350_CxCFG_VBP_SHIFT, 1, 0), SOC_ENUM("Ch1 Binary Output Select", sta350_binary_output_ch1_enum), SOC_ENUM("Ch2 Binary Output Select", sta350_binary_output_ch2_enum), SOC_ENUM("Ch3 Binary Output Select", sta350_binary_output_ch3_enum), SOC_ENUM("Ch1 Limiter Select", sta350_limiter_ch1_enum), SOC_ENUM("Ch2 Limiter Select", sta350_limiter_ch2_enum), SOC_ENUM("Ch3 Limiter Select", sta350_limiter_ch3_enum), /* TONE */ SOC_SINGLE_RANGE_TLV("Bass Tone Control Volume", STA350_TONE, STA350_TONE_BTC_SHIFT, 1, 13, 0, tone_tlv), SOC_SINGLE_RANGE_TLV("Treble Tone Control Volume", STA350_TONE, STA350_TONE_TTC_SHIFT, 1, 13, 0, tone_tlv), SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta350_limiter1_attack_rate_enum), SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta350_limiter2_attack_rate_enum), SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta350_limiter1_release_rate_enum), SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta350_limiter2_release_rate_enum), /* * depending on mode, the attack/release thresholds have * two different enum definitions; provide both */ SOC_SINGLE_TLV("Limiter1 Attack Threshold (AC Mode)", STA350_L1ATRT, STA350_LxA_SHIFT, 16, 0, sta350_limiter_ac_attack_tlv), SOC_SINGLE_TLV("Limiter2 Attack Threshold (AC Mode)", STA350_L2ATRT, STA350_LxA_SHIFT, 16, 0, sta350_limiter_ac_attack_tlv), SOC_SINGLE_TLV("Limiter1 Release Threshold (AC Mode)", STA350_L1ATRT, STA350_LxR_SHIFT, 16, 0, sta350_limiter_ac_release_tlv), SOC_SINGLE_TLV("Limiter2 Release Threshold (AC Mode)", STA350_L2ATRT, STA350_LxR_SHIFT, 16, 0, sta350_limiter_ac_release_tlv), SOC_SINGLE_TLV("Limiter1 Attack Threshold (DRC Mode)", STA350_L1ATRT, STA350_LxA_SHIFT, 16, 0, sta350_limiter_drc_attack_tlv), SOC_SINGLE_TLV("Limiter2 Attack Threshold (DRC Mode)", STA350_L2ATRT, STA350_LxA_SHIFT, 16, 0, sta350_limiter_drc_attack_tlv), SOC_SINGLE_TLV("Limiter1 Release Threshold (DRC Mode)", STA350_L1ATRT, STA350_LxR_SHIFT, 16, 0, sta350_limiter_drc_release_tlv), SOC_SINGLE_TLV("Limiter2 Release Threshold (DRC Mode)", STA350_L2ATRT, STA350_LxR_SHIFT, 16, 0, sta350_limiter_drc_release_tlv), BIQUAD_COEFS("Ch1 - Biquad 1", 0), BIQUAD_COEFS("Ch1 - Biquad 2", 5), BIQUAD_COEFS("Ch1 - Biquad 3", 10), BIQUAD_COEFS("Ch1 - Biquad 4", 15), BIQUAD_COEFS("Ch2 - Biquad 1", 20), BIQUAD_COEFS("Ch2 - Biquad 2", 25), BIQUAD_COEFS("Ch2 - Biquad 3", 30), BIQUAD_COEFS("Ch2 - Biquad 4", 35), BIQUAD_COEFS("High-pass", 40), BIQUAD_COEFS("Low-pass", 45), SINGLE_COEF("Ch1 - Prescale", 50), SINGLE_COEF("Ch2 - Prescale", 51), SINGLE_COEF("Ch1 - Postscale", 52), SINGLE_COEF("Ch2 - Postscale", 53), SINGLE_COEF("Ch3 - Postscale", 54), SINGLE_COEF("Thermal warning - Postscale", 55), SINGLE_COEF("Ch1 - Mix 1", 56), SINGLE_COEF("Ch1 - Mix 2", 57), SINGLE_COEF("Ch2 - Mix 1", 58), SINGLE_COEF("Ch2 - Mix 2", 59), SINGLE_COEF("Ch3 - Mix 1", 60), SINGLE_COEF("Ch3 - Mix 2", 61), }; static const struct snd_soc_dapm_widget sta350_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("LEFT"), SND_SOC_DAPM_OUTPUT("RIGHT"), SND_SOC_DAPM_OUTPUT("SUB"), }; static const struct snd_soc_dapm_route sta350_dapm_routes[] = { { "LEFT", NULL, "DAC" }, { "RIGHT", NULL, "DAC" }, { "SUB", NULL, "DAC" }, { "DAC", NULL, "Playback" }, }; /* MCLK interpolation ratio per fs */ static struct { int fs; int ir; } interpolation_ratios[] = { { 32000, 0 }, { 44100, 0 }, { 48000, 0 }, { 88200, 1 }, { 96000, 1 }, { 176400, 2 }, { 192000, 2 }, }; /* MCLK to fs clock ratios */ static int mcs_ratio_table[3][6] = { { 768, 512, 384, 256, 128, 576 }, { 384, 256, 192, 128, 64, 0 }, { 192, 128, 96, 64, 32, 0 }, }; /** * sta350_set_dai_sysclk - configure MCLK * @codec_dai: the codec DAI * @clk_id: the clock ID (ignored) * @freq: the MCLK input frequency * @dir: the clock direction (ignored) * * The value of MCLK is used to determine which sample rates are supported * by the STA350, based on the mcs_ratio_table. * * This function must be called by the machine driver's 'startup' function, * otherwise the list of supported sample rates will not be available in * time for ALSA. */ static int sta350_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); dev_dbg(codec->dev, "mclk=%u\n", freq); sta350->mclk = freq; return 0; } /** * sta350_set_dai_fmt - configure the codec for the selected audio format * @codec_dai: the codec DAI * @fmt: a SND_SOC_DAIFMT_x value indicating the data format * * This function takes a bitmask of SND_SOC_DAIFMT_x bits and programs the * codec accordingly. */ static int sta350_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); unsigned int confb = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: sta350->format = fmt & SND_SOC_DAIFMT_FORMAT_MASK; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: confb |= STA350_CONFB_C2IM; break; case SND_SOC_DAIFMT_NB_IF: confb |= STA350_CONFB_C1IM; break; default: return -EINVAL; } return regmap_update_bits(sta350->regmap, STA350_CONFB, STA350_CONFB_C1IM | STA350_CONFB_C2IM, confb); } /** * sta350_hw_params - program the STA350 with the given hardware parameters. * @substream: the audio stream * @params: the hardware parameters to set * @dai: the SOC DAI (ignored) * * This function programs the hardware with the values provided. * Specifically, the sample rate and the data format. */ static int sta350_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); int i, mcs = -EINVAL, ir = -EINVAL; unsigned int confa, confb; unsigned int rate, ratio; int ret; if (!sta350->mclk) { dev_err(codec->dev, "sta350->mclk is unset. Unable to determine ratio\n"); return -EIO; } rate = params_rate(params); ratio = sta350->mclk / rate; dev_dbg(codec->dev, "rate: %u, ratio: %u\n", rate, ratio); for (i = 0; i < ARRAY_SIZE(interpolation_ratios); i++) { if (interpolation_ratios[i].fs == rate) { ir = interpolation_ratios[i].ir; break; } } if (ir < 0) { dev_err(codec->dev, "Unsupported samplerate: %u\n", rate); return -EINVAL; } for (i = 0; i < 6; i++) { if (mcs_ratio_table[ir][i] == ratio) { mcs = i; break; } } if (mcs < 0) { dev_err(codec->dev, "Unresolvable ratio: %u\n", ratio); return -EINVAL; } confa = (ir << STA350_CONFA_IR_SHIFT) | (mcs << STA350_CONFA_MCS_SHIFT); confb = 0; switch (params_width(params)) { case 24: dev_dbg(codec->dev, "24bit\n"); /* fall through */ case 32: dev_dbg(codec->dev, "24bit or 32bit\n"); switch (sta350->format) { case SND_SOC_DAIFMT_I2S: confb |= 0x0; break; case SND_SOC_DAIFMT_LEFT_J: confb |= 0x1; break; case SND_SOC_DAIFMT_RIGHT_J: confb |= 0x2; break; } break; case 20: dev_dbg(codec->dev, "20bit\n"); switch (sta350->format) { case SND_SOC_DAIFMT_I2S: confb |= 0x4; break; case SND_SOC_DAIFMT_LEFT_J: confb |= 0x5; break; case SND_SOC_DAIFMT_RIGHT_J: confb |= 0x6; break; } break; case 18: dev_dbg(codec->dev, "18bit\n"); switch (sta350->format) { case SND_SOC_DAIFMT_I2S: confb |= 0x8; break; case SND_SOC_DAIFMT_LEFT_J: confb |= 0x9; break; case SND_SOC_DAIFMT_RIGHT_J: confb |= 0xa; break; } break; case 16: dev_dbg(codec->dev, "16bit\n"); switch (sta350->format) { case SND_SOC_DAIFMT_I2S: confb |= 0x0; break; case SND_SOC_DAIFMT_LEFT_J: confb |= 0xd; break; case SND_SOC_DAIFMT_RIGHT_J: confb |= 0xe; break; } break; default: return -EINVAL; } ret = regmap_update_bits(sta350->regmap, STA350_CONFA, STA350_CONFA_MCS_MASK | STA350_CONFA_IR_MASK, confa); if (ret < 0) return ret; ret = regmap_update_bits(sta350->regmap, STA350_CONFB, STA350_CONFB_SAI_MASK | STA350_CONFB_SAIFB, confb); if (ret < 0) return ret; return 0; } static int sta350_startup_sequence(struct sta350_priv *sta350) { if (sta350->gpiod_power_down) gpiod_set_value(sta350->gpiod_power_down, 1); if (sta350->gpiod_nreset) { gpiod_set_value(sta350->gpiod_nreset, 0); mdelay(1); gpiod_set_value(sta350->gpiod_nreset, 1); mdelay(1); } return 0; } /** * sta350_set_bias_level - DAPM callback * @codec: the codec device * @level: DAPM power level * * This is called by ALSA to put the codec into low power mode * or to wake it up. If the codec is powered off completely * all registers must be restored after power on. */ static int sta350_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); int ret; dev_dbg(codec->dev, "level = %d\n", level); switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* Full power on */ regmap_update_bits(sta350->regmap, STA350_CONFF, STA350_CONFF_PWDN | STA350_CONFF_EAPD, STA350_CONFF_PWDN | STA350_CONFF_EAPD); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable( ARRAY_SIZE(sta350->supplies), sta350->supplies); if (ret < 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } sta350_startup_sequence(sta350); sta350_cache_sync(codec); } /* Power down */ regmap_update_bits(sta350->regmap, STA350_CONFF, STA350_CONFF_PWDN | STA350_CONFF_EAPD, 0); break; case SND_SOC_BIAS_OFF: /* The chip runs through the power down sequence for us */ regmap_update_bits(sta350->regmap, STA350_CONFF, STA350_CONFF_PWDN | STA350_CONFF_EAPD, 0); /* power down: low */ if (sta350->gpiod_power_down) gpiod_set_value(sta350->gpiod_power_down, 0); if (sta350->gpiod_nreset) gpiod_set_value(sta350->gpiod_nreset, 0); regulator_bulk_disable(ARRAY_SIZE(sta350->supplies), sta350->supplies); break; } codec->dapm.bias_level = level; return 0; } static const struct snd_soc_dai_ops sta350_dai_ops = { .hw_params = sta350_hw_params, .set_sysclk = sta350_set_dai_sysclk, .set_fmt = sta350_set_dai_fmt, }; static struct snd_soc_dai_driver sta350_dai = { .name = "sta350-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = STA350_RATES, .formats = STA350_FORMATS, }, .ops = &sta350_dai_ops, }; #ifdef CONFIG_PM static int sta350_suspend(struct snd_soc_codec *codec) { sta350_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int sta350_resume(struct snd_soc_codec *codec) { sta350_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define sta350_suspend NULL #define sta350_resume NULL #endif static int sta350_probe(struct snd_soc_codec *codec) { struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); struct sta350_platform_data *pdata = sta350->pdata; int i, ret = 0, thermal = 0; ret = regulator_bulk_enable(ARRAY_SIZE(sta350->supplies), sta350->supplies); if (ret < 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } ret = sta350_startup_sequence(sta350); if (ret < 0) { dev_err(codec->dev, "Failed to startup device\n"); return ret; } /* CONFA */ if (!pdata->thermal_warning_recovery) thermal |= STA350_CONFA_TWAB; if (!pdata->thermal_warning_adjustment) thermal |= STA350_CONFA_TWRB; if (!pdata->fault_detect_recovery) thermal |= STA350_CONFA_FDRB; regmap_update_bits(sta350->regmap, STA350_CONFA, STA350_CONFA_TWAB | STA350_CONFA_TWRB | STA350_CONFA_FDRB, thermal); /* CONFC */ regmap_update_bits(sta350->regmap, STA350_CONFC, STA350_CONFC_OM_MASK, pdata->ffx_power_output_mode << STA350_CONFC_OM_SHIFT); regmap_update_bits(sta350->regmap, STA350_CONFC, STA350_CONFC_CSZ_MASK, pdata->drop_compensation_ns << STA350_CONFC_CSZ_SHIFT); regmap_update_bits(sta350->regmap, STA350_CONFC, STA350_CONFC_OCRB, pdata->oc_warning_adjustment ? STA350_CONFC_OCRB : 0); /* CONFE */ regmap_update_bits(sta350->regmap, STA350_CONFE, STA350_CONFE_MPCV, pdata->max_power_use_mpcc ? STA350_CONFE_MPCV : 0); regmap_update_bits(sta350->regmap, STA350_CONFE, STA350_CONFE_MPC, pdata->max_power_correction ? STA350_CONFE_MPC : 0); regmap_update_bits(sta350->regmap, STA350_CONFE, STA350_CONFE_AME, pdata->am_reduction_mode ? STA350_CONFE_AME : 0); regmap_update_bits(sta350->regmap, STA350_CONFE, STA350_CONFE_PWMS, pdata->odd_pwm_speed_mode ? STA350_CONFE_PWMS : 0); regmap_update_bits(sta350->regmap, STA350_CONFE, STA350_CONFE_DCCV, pdata->distortion_compensation ? STA350_CONFE_DCCV : 0); /* CONFF */ regmap_update_bits(sta350->regmap, STA350_CONFF, STA350_CONFF_IDE, pdata->invalid_input_detect_mute ? STA350_CONFF_IDE : 0); regmap_update_bits(sta350->regmap, STA350_CONFF, STA350_CONFF_OCFG_MASK, pdata->output_conf << STA350_CONFF_OCFG_SHIFT); /* channel to output mapping */ regmap_update_bits(sta350->regmap, STA350_C1CFG, STA350_CxCFG_OM_MASK, pdata->ch1_output_mapping << STA350_CxCFG_OM_SHIFT); regmap_update_bits(sta350->regmap, STA350_C2CFG, STA350_CxCFG_OM_MASK, pdata->ch2_output_mapping << STA350_CxCFG_OM_SHIFT); regmap_update_bits(sta350->regmap, STA350_C3CFG, STA350_CxCFG_OM_MASK, pdata->ch3_output_mapping << STA350_CxCFG_OM_SHIFT); /* miscellaneous registers */ regmap_update_bits(sta350->regmap, STA350_MISC1, STA350_MISC1_CPWMEN, pdata->activate_mute_output ? STA350_MISC1_CPWMEN : 0); regmap_update_bits(sta350->regmap, STA350_MISC1, STA350_MISC1_BRIDGOFF, pdata->bridge_immediate_off ? STA350_MISC1_BRIDGOFF : 0); regmap_update_bits(sta350->regmap, STA350_MISC1, STA350_MISC1_NSHHPEN, pdata->noise_shape_dc_cut ? STA350_MISC1_NSHHPEN : 0); regmap_update_bits(sta350->regmap, STA350_MISC1, STA350_MISC1_RPDNEN, pdata->powerdown_master_vol ? STA350_MISC1_RPDNEN: 0); regmap_update_bits(sta350->regmap, STA350_MISC2, STA350_MISC2_PNDLSL_MASK, pdata->powerdown_delay_divider << STA350_MISC2_PNDLSL_SHIFT); /* initialize coefficient shadow RAM with reset values */ for (i = 4; i <= 49; i += 5) sta350->coef_shadow[i] = 0x400000; for (i = 50; i <= 54; i++) sta350->coef_shadow[i] = 0x7fffff; sta350->coef_shadow[55] = 0x5a9df7; sta350->coef_shadow[56] = 0x7fffff; sta350->coef_shadow[59] = 0x7fffff; sta350->coef_shadow[60] = 0x400000; sta350->coef_shadow[61] = 0x400000; sta350_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Bias level configuration will have done an extra enable */ regulator_bulk_disable(ARRAY_SIZE(sta350->supplies), sta350->supplies); return 0; } static int sta350_remove(struct snd_soc_codec *codec) { struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec); sta350_set_bias_level(codec, SND_SOC_BIAS_OFF); regulator_bulk_disable(ARRAY_SIZE(sta350->supplies), sta350->supplies); return 0; } static const struct snd_soc_codec_driver sta350_codec = { .probe = sta350_probe, .remove = sta350_remove, .suspend = sta350_suspend, .resume = sta350_resume, .set_bias_level = sta350_set_bias_level, .controls = sta350_snd_controls, .num_controls = ARRAY_SIZE(sta350_snd_controls), .dapm_widgets = sta350_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(sta350_dapm_widgets), .dapm_routes = sta350_dapm_routes, .num_dapm_routes = ARRAY_SIZE(sta350_dapm_routes), }; static const struct regmap_config sta350_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = STA350_MISC2, .reg_defaults = sta350_regs, .num_reg_defaults = ARRAY_SIZE(sta350_regs), .cache_type = REGCACHE_RBTREE, .wr_table = &sta350_write_regs, .rd_table = &sta350_read_regs, .volatile_table = &sta350_volatile_regs, }; #ifdef CONFIG_OF static const struct of_device_id st350_dt_ids[] = { { .compatible = "st,sta350", }, { } }; MODULE_DEVICE_TABLE(of, st350_dt_ids); static const char * const sta350_ffx_modes[] = { [STA350_FFX_PM_DROP_COMP] = "drop-compensation", [STA350_FFX_PM_TAPERED_COMP] = "tapered-compensation", [STA350_FFX_PM_FULL_POWER] = "full-power-mode", [STA350_FFX_PM_VARIABLE_DROP_COMP] = "variable-drop-compensation", }; static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350) { struct device_node *np = dev->of_node; struct sta350_platform_data *pdata; const char *ffx_power_mode; u16 tmp; u8 tmp8; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; of_property_read_u8(np, "st,output-conf", &pdata->output_conf); of_property_read_u8(np, "st,ch1-output-mapping", &pdata->ch1_output_mapping); of_property_read_u8(np, "st,ch2-output-mapping", &pdata->ch2_output_mapping); of_property_read_u8(np, "st,ch3-output-mapping", &pdata->ch3_output_mapping); if (of_get_property(np, "st,thermal-warning-recovery", NULL)) pdata->thermal_warning_recovery = 1; if (of_get_property(np, "st,thermal-warning-adjustment", NULL)) pdata->thermal_warning_adjustment = 1; if (of_get_property(np, "st,fault-detect-recovery", NULL)) pdata->fault_detect_recovery = 1; pdata->ffx_power_output_mode = STA350_FFX_PM_VARIABLE_DROP_COMP; if (!of_property_read_string(np, "st,ffx-power-output-mode", &ffx_power_mode)) { int i, mode = -EINVAL; for (i = 0; i < ARRAY_SIZE(sta350_ffx_modes); i++) if (!strcasecmp(ffx_power_mode, sta350_ffx_modes[i])) mode = i; if (mode < 0) dev_warn(dev, "Unsupported ffx output mode: %s\n", ffx_power_mode); else pdata->ffx_power_output_mode = mode; } tmp = 140; of_property_read_u16(np, "st,drop-compensation-ns", &tmp); pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20; if (of_get_property(np, "st,overcurrent-warning-adjustment", NULL)) pdata->oc_warning_adjustment = 1; /* CONFE */ if (of_get_property(np, "st,max-power-use-mpcc", NULL)) pdata->max_power_use_mpcc = 1; if (of_get_property(np, "st,max-power-correction", NULL)) pdata->max_power_correction = 1; if (of_get_property(np, "st,am-reduction-mode", NULL)) pdata->am_reduction_mode = 1; if (of_get_property(np, "st,odd-pwm-speed-mode", NULL)) pdata->odd_pwm_speed_mode = 1; if (of_get_property(np, "st,distortion-compensation", NULL)) pdata->distortion_compensation = 1; /* CONFF */ if (of_get_property(np, "st,invalid-input-detect-mute", NULL)) pdata->invalid_input_detect_mute = 1; /* MISC */ if (of_get_property(np, "st,activate-mute-output", NULL)) pdata->activate_mute_output = 1; if (of_get_property(np, "st,bridge-immediate-off", NULL)) pdata->bridge_immediate_off = 1; if (of_get_property(np, "st,noise-shape-dc-cut", NULL)) pdata->noise_shape_dc_cut = 1; if (of_get_property(np, "st,powerdown-master-volume", NULL)) pdata->powerdown_master_vol = 1; if (!of_property_read_u8(np, "st,powerdown-delay-divider", &tmp8)) { if (is_power_of_2(tmp8) && tmp8 >= 1 && tmp8 <= 128) pdata->powerdown_delay_divider = ilog2(tmp8); else dev_warn(dev, "Unsupported powerdown delay divider %d\n", tmp8); } sta350->pdata = pdata; return 0; } #endif static int sta350_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct device *dev = &i2c->dev; struct sta350_priv *sta350; int ret, i; sta350 = devm_kzalloc(dev, sizeof(struct sta350_priv), GFP_KERNEL); if (!sta350) return -ENOMEM; mutex_init(&sta350->coeff_lock); sta350->pdata = dev_get_platdata(dev); #ifdef CONFIG_OF if (dev->of_node) { ret = sta350_probe_dt(dev, sta350); if (ret < 0) return ret; } #endif /* GPIOs */ sta350->gpiod_nreset = devm_gpiod_get(dev, "reset"); if (IS_ERR(sta350->gpiod_nreset)) { ret = PTR_ERR(sta350->gpiod_nreset); if (ret != -ENOENT && ret != -ENOSYS) return ret; sta350->gpiod_nreset = NULL; } else { gpiod_direction_output(sta350->gpiod_nreset, 0); } sta350->gpiod_power_down = devm_gpiod_get(dev, "power-down"); if (IS_ERR(sta350->gpiod_power_down)) { ret = PTR_ERR(sta350->gpiod_power_down); if (ret != -ENOENT && ret != -ENOSYS) return ret; sta350->gpiod_power_down = NULL; } else { gpiod_direction_output(sta350->gpiod_power_down, 0); } /* regulators */ for (i = 0; i < ARRAY_SIZE(sta350->supplies); i++) sta350->supplies[i].supply = sta350_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sta350->supplies), sta350->supplies); if (ret < 0) { dev_err(dev, "Failed to request supplies: %d\n", ret); return ret; } sta350->regmap = devm_regmap_init_i2c(i2c, &sta350_regmap); if (IS_ERR(sta350->regmap)) { ret = PTR_ERR(sta350->regmap); dev_err(dev, "Failed to init regmap: %d\n", ret); return ret; } i2c_set_clientdata(i2c, sta350); ret = snd_soc_register_codec(dev, &sta350_codec, &sta350_dai, 1); if (ret < 0) dev_err(dev, "Failed to register codec (%d)\n", ret); return ret; } static int sta350_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id sta350_i2c_id[] = { { "sta350", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sta350_i2c_id); static struct i2c_driver sta350_i2c_driver = { .driver = { .name = "sta350", .owner = THIS_MODULE, .of_match_table = of_match_ptr(st350_dt_ids), }, .probe = sta350_i2c_probe, .remove = sta350_i2c_remove, .id_table = sta350_i2c_id, }; module_i2c_driver(sta350_i2c_driver); MODULE_DESCRIPTION("ASoC STA350 driver"); MODULE_AUTHOR("Sven Brandau <info@brandau.biz>"); MODULE_LICENSE("GPL");
gpl-2.0
hujiafu/lpc1788_uclinux
cortexm_uclinux-master/kernel/linux-2.6.33/arch/sparc/prom/tree_32.c
728
8238
/* * tree.c: Basic device tree traversal/scanning for the Linux * prom library. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> extern void restore_current(void); static char promlib_buf[128]; /* Internal version of prom_getchild that does not alter return values. */ int __prom_getchild(int node) { unsigned long flags; int cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_child(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the child of node 'node' or zero if no this node has no * direct descendent. */ int prom_getchild(int node) { int cnode; if (node == -1) return 0; cnode = __prom_getchild(node); if (cnode == 0 || cnode == -1) return 0; return cnode; } EXPORT_SYMBOL(prom_getchild); /* Internal version of prom_getsibling that does not alter return values. */ int __prom_getsibling(int node) { unsigned long flags; int cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_nextnode(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the next sibling of node 'node' or zero if no more siblings * at this level of depth in the tree. */ int prom_getsibling(int node) { int sibnode; if (node == -1) return 0; sibnode = __prom_getsibling(node); if (sibnode == 0 || sibnode == -1) return 0; return sibnode; } EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ int prom_getproplen(int node, const char *prop) { int ret; unsigned long flags; if((!node) || (!prop)) return -1; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_proplen(node, prop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproplen); /* Acquire a property 'prop' at node 'node' and place it in * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ int prom_getproperty(int node, const char *prop, char *buffer, int bufsize) { int plen, ret; unsigned long flags; plen = prom_getproplen(node, prop); if((plen > bufsize) || (plen == 0) || (plen == -1)) return -1; /* Ok, things seem all right. */ spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_getprop(node, prop, buffer); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ int prom_getint(int node, char *prop) { static int intprop; if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) return intprop; return -1; } EXPORT_SYMBOL(prom_getint); /* Acquire an integer property, upon error return the passed default * integer. */ int prom_getintdefault(int node, char *property, int deflt) { int retval; retval = prom_getint(node, property); if(retval == -1) return deflt; return retval; } EXPORT_SYMBOL(prom_getintdefault); /* Acquire a boolean property, 1=TRUE 0=FALSE. */ int prom_getbool(int node, char *prop) { int retval; retval = prom_getproplen(node, prop); if(retval == -1) return 0; return 1; } EXPORT_SYMBOL(prom_getbool); /* Acquire a property whose value is a string, returns a null * string on error. The char pointer is the user supplied string * buffer. */ void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) { int len; len = prom_getproperty(node, prop, user_buf, ubuf_size); if(len != -1) return; user_buf[0] = 0; return; } EXPORT_SYMBOL(prom_getstring); /* Does the device at node 'node' have name 'name'? * YES = 1 NO = 0 */ int prom_nodematch(int node, char *name) { int error; static char namebuf[128]; error = prom_getproperty(node, "name", namebuf, sizeof(namebuf)); if (error == -1) return 0; if(strcmp(namebuf, name) == 0) return 1; return 0; } /* Search siblings at 'node_start' for a node with name * 'nodename'. Return node if successful, zero if not. */ int prom_searchsiblings(int node_start, char *nodename) { int thisnode, error; for(thisnode = node_start; thisnode; thisnode=prom_getsibling(thisnode)) { error = prom_getproperty(thisnode, "name", promlib_buf, sizeof(promlib_buf)); /* Should this ever happen? */ if(error == -1) continue; if(strcmp(nodename, promlib_buf)==0) return thisnode; } return 0; } EXPORT_SYMBOL(prom_searchsiblings); /* Interal version of nextprop that does not alter return values. */ char * __prom_nextprop(int node, char * oprop) { unsigned long flags; char *prop; spin_lock_irqsave(&prom_lock, flags); prop = prom_nodeops->no_nextprop(node, oprop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return prop; } /* Return the first property name for node 'node'. */ /* buffer is unused argument, but as v9 uses it, we need to have the same interface */ char * prom_firstprop(int node, char *bufer) { if (node == 0 || node == -1) return ""; return __prom_nextprop(node, ""); } EXPORT_SYMBOL(prom_firstprop); /* Return the property type string after property type 'oprop' * at node 'node' . Returns empty string if no more * property types for this node. */ char * prom_nextprop(int node, char *oprop, char *buffer) { if (node == 0 || node == -1) return ""; return __prom_nextprop(node, oprop); } EXPORT_SYMBOL(prom_nextprop); int prom_finddevice(char *name) { char nbuf[128]; char *s = name, *d; int node = prom_root_node, node2; unsigned int which_io, phys_addr; struct linux_prom_registers reg[PROMREG_MAX]; while (*s++) { if (!*s) return node; /* path '.../' is legal */ node = prom_getchild(node); for (d = nbuf; *s != 0 && *s != '@' && *s != '/';) *d++ = *s++; *d = 0; node = prom_searchsiblings(node, nbuf); if (!node) return 0; if (*s == '@') { if (isxdigit(s[1]) && s[2] == ',') { which_io = simple_strtoul(s+1, NULL, 16); phys_addr = simple_strtoul(s+3, &d, 16); if (d != s + 3 && (!*d || *d == '/') && d <= s + 3 + 8) { node2 = node; while (node2 && node2 != -1) { if (prom_getproperty (node2, "reg", (char *)reg, sizeof (reg)) > 0) { if (which_io == reg[0].which_io && phys_addr == reg[0].phys_addr) { node = node2; break; } } node2 = prom_getsibling(node2); if (!node2 || node2 == -1) break; node2 = prom_searchsiblings(prom_getsibling(node2), nbuf); } } } while (*s != 0 && *s != '/') s++; } } return node; } EXPORT_SYMBOL(prom_finddevice); int prom_node_has_property(int node, char *prop) { char *current_property = ""; do { current_property = prom_nextprop(node, current_property, NULL); if(!strcmp(current_property, prop)) return 1; } while (*current_property); return 0; } EXPORT_SYMBOL(prom_node_has_property); /* Set property 'pname' at node 'node' to value 'value' which has a length * of 'size' bytes. Return the number of bytes the prom accepted. */ int prom_setprop(int node, const char *pname, char *value, int size) { unsigned long flags; int ret; if(size == 0) return 0; if((pname == 0) || (value == 0)) return 0; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_setprop(node, pname, value, size); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_setprop); int prom_inst2pkg(int inst) { int node; unsigned long flags; spin_lock_irqsave(&prom_lock, flags); node = (*romvec->pv_v2devops.v2_inst2pkg)(inst); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); if (node == -1) return 0; return node; } /* Return 'node' assigned to a particular prom 'path' * FIXME: Should work for v0 as well */ int prom_pathtoinode(char *path) { int node, inst; inst = prom_devopen (path); if (inst == -1) return 0; node = prom_inst2pkg (inst); prom_devclose (inst); if (node == -1) return 0; return node; }
gpl-2.0
craneboard/craneboard-kernel
arch/arm/mach-clps711x/fortunet.c
1496
2197
/* * linux/arch/arm/mach-clps711x/fortunet.c * * Derived from linux/arch/arm/mach-integrator/arch.c * * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/initrd.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/memory.h> #include "common.h" struct meminfo memmap = { .nr_banks = 1, .bank = { { .start = 0xC0000000, .size = 0x01000000, .node = 0 }, }, }; typedef struct tag_IMAGE_PARAMS { int ramdisk_ok; int ramdisk_address; int ramdisk_size; int ram_size; int extra_param_type; int extra_param_ptr; int command_line; } IMAGE_PARAMS; #define IMAGE_PARAMS_PHYS 0xC01F0000 static void __init fortunet_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { IMAGE_PARAMS *ip = phys_to_virt(IMAGE_PARAMS_PHYS); *cmdline = phys_to_virt(ip->command_line); #ifdef CONFIG_BLK_DEV_INITRD if(ip->ramdisk_ok) { initrd_start = __phys_to_virt(ip->ramdisk_address); initrd_end = initrd_start + ip->ramdisk_size; } #endif memmap.bank[0].size = ip->ram_size; *mi = memmap; } MACHINE_START(FORTUNET, "ARM-FortuNet") /* Maintainer: FortuNet Inc. */ .phys_io = 0x80000000, .io_pg_offst = ((0xf0000000) >> 18) & 0xfffc, .boot_params = 0x00000000, .fixup = fortunet_fixup, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, .timer = &clps711x_timer, MACHINE_END
gpl-2.0
Split-Screen/android_kernel_motorola_msm8916
fs/lockd/clntxdr.c
2776
13690
/* * linux/fs/lockd/clntxdr.c * * XDR functions to encode/decode NLM version 3 RPC arguments and results. * NLM version 3 is backwards compatible with NLM versions 1 and 2. * * NLM client-side only. * * Copyright (C) 2010, Oracle. All rights reserved. */ #include <linux/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" #endif /* * Declare the space requirements for NLM arguments and replies as * number of 32bit-words */ #define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) #define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2)) #define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz) #define NLM_holder_sz (4+NLM_owner_sz) #define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz) #define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz) #define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz) #define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz) #define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz) #define NLM_res_sz (NLM_cookie_sz+1) #define NLM_norep_sz (0) static s32 loff_t_to_s32(loff_t offset) { s32 res; if (offset >= NLM_OFFSET_MAX) res = NLM_OFFSET_MAX; else if (offset <= -NLM_OFFSET_MAX) res = -NLM_OFFSET_MAX; else res = offset; return res; } static void nlm_compute_offsets(const struct nlm_lock *lock, u32 *l_offset, u32 *l_len) { const struct file_lock *fl = &lock->fl; *l_offset = loff_t_to_s32(fl->fl_start); if (fl->fl_end == OFFSET_MAX) *l_len = 0; else *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); } /* * Handle decode buffer overflows out-of-line. */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("lockd: %s prematurely hit the end of our receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } /* * Encode/decode NLMv3 basic data types * * Basic NLMv3 data types are not defined in an IETF standards * document. X/Open has a description of these data types that * is useful. See Chapter 10 of "Protocols for Interworking: * XNFS, Version 3W". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_bool(struct xdr_stream *xdr, const int value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = value ? xdr_one : xdr_zero; } static void encode_int32(struct xdr_stream *xdr, const s32 value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } /* * typedef opaque netobj<MAXNETOBJ_SZ> */ static void encode_netobj(struct xdr_stream *xdr, const u8 *data, const unsigned int length) { __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, data, length); } static int decode_netobj(struct xdr_stream *xdr, struct xdr_netobj *obj) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); if (unlikely(length > XDR_MAX_NETOBJ)) goto out_size; obj->len = length; obj->data = (u8 *)p; return 0; out_size: dprintk("NFS: returned netobj was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj cookie; */ static void encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie) { encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); } static int decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); /* apparently HPUX can return empty cookies */ if (length == 0) goto out_hpux; if (length > NLM_MAXCOOKIELEN) goto out_size; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; cookie->len = length; memcpy(cookie->data, p, length); return 0; out_hpux: cookie->len = 4; memset(cookie->data, 0, 4); return 0; out_size: dprintk("NFS: returned cookie was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj fh; */ static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) { encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE); } /* * enum nlm_stats { * LCK_GRANTED = 0, * LCK_DENIED = 1, * LCK_DENIED_NOLOCKS = 2, * LCK_BLOCKED = 3, * LCK_DENIED_GRACE_PERIOD = 4 * }; * * * struct nlm_stat { * nlm_stats stat; * }; * * NB: we don't swap bytes for the NLM status values. The upper * layers deal directly with the status value in network byte * order. */ static void encode_nlm_stat(struct xdr_stream *xdr, const __be32 stat) { __be32 *p; WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } static int decode_nlm_stat(struct xdr_stream *xdr, __be32 *stat) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period))) goto out_enum; *stat = *p; return 0; out_enum: dprintk("%s: server returned invalid nlm_stats value: %u\n", __func__, be32_to_cpup(p)); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * struct nlm_holder { * bool exclusive; * int uppid; * netobj oh; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_holder(struct xdr_stream *xdr, const struct nlm_res *result) { const struct nlm_lock *lock = &result->lock; u32 l_offset, l_len; __be32 *p; encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) { struct nlm_lock *lock = &result->lock; struct file_lock *fl = &lock->fl; u32 exclusive, l_offset, l_len; int error; __be32 *p; s32 end; memset(lock, 0, sizeof(*lock)); locks_init_lock(fl); p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); fl->fl_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) goto out; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; fl->fl_flags = FL_POSIX; fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; l_offset = be32_to_cpup(p++); l_len = be32_to_cpup(p); end = l_offset + l_len - 1; fl->fl_start = (loff_t)l_offset; if (l_len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = (loff_t)end; error = 0; out: return error; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * string caller_name<LM_MAXSTRLEN>; */ static void encode_caller_name(struct xdr_stream *xdr, const char *name) { /* NB: client-side does not set lock->len */ u32 length = strlen(name); __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } /* * struct nlm_lock { * string caller_name<LM_MAXSTRLEN>; * netobj fh; * netobj oh; * int uppid; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) { u32 l_offset, l_len; __be32 *p; encode_caller_name(xdr, lock->caller); encode_fh(xdr, &lock->fh); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(lock->svid); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } /* * NLMv3 XDR encode functions * * NLMv3 argument types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * struct nlm_testargs { * netobj cookie; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_testargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_lockargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * bool reclaim; * int state; * }; */ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); } /* * struct nlm_cancargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_unlockargs { * netobj cookie; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_nlm_lock(xdr, lock); } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static void nlm_xdr_enc_res(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); } /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static void encode_nlm_testrply(struct xdr_stream *xdr, const struct nlm_res *result) { if (result->status == nlm_lck_denied) encode_nlm_holder(xdr, result); } static void nlm_xdr_enc_testres(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); encode_nlm_testrply(xdr, result); } /* * NLMv3 XDR decode functions * * NLMv3 result types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static int decode_nlm_testrply(struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_nlm_stat(xdr, &result->status); if (unlikely(error)) goto out; if (result->status == nlm_lck_denied) error = decode_nlm_holder(xdr, result); out: return error; } static int nlm_xdr_dec_testres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_testrply(xdr, result); out: return error; } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static int nlm_xdr_dec_res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_stat(xdr, &result->status); out: return error; } /* * For NLM, a void procedure really returns nothing */ #define nlm_xdr_dec_norep NULL #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \ .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } static struct rpc_procinfo nlm_procedures[] = { PROC(TEST, testargs, testres), PROC(LOCK, lockargs, res), PROC(CANCEL, cancargs, res), PROC(UNLOCK, unlockargs, res), PROC(GRANTED, testargs, res), PROC(TEST_MSG, testargs, norep), PROC(LOCK_MSG, lockargs, norep), PROC(CANCEL_MSG, cancargs, norep), PROC(UNLOCK_MSG, unlockargs, norep), PROC(GRANTED_MSG, testargs, norep), PROC(TEST_RES, testres, norep), PROC(LOCK_RES, res, norep), PROC(CANCEL_RES, res, norep), PROC(UNLOCK_RES, res, norep), PROC(GRANTED_RES, res, norep), }; static const struct rpc_version nlm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static const struct rpc_version nlm_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static const struct rpc_version *nlm_versions[] = { [1] = &nlm_version1, [3] = &nlm_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlm_version4, #endif }; static struct rpc_stat nlm_rpc_stats; const struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, .nrvers = ARRAY_SIZE(nlm_versions), .version = nlm_versions, .stats = &nlm_rpc_stats, };
gpl-2.0
MatusKysel/Medusa
arch/arm/mach-pxa/mp900.c
3544
2367
/* * linux/arch/arm/mach-pxa/mp900.c * * Support for the NEC MobilePro900/C platform * * Based on mach-pxa/gumstix.c * * 2007, 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com> * 2007, 2008 Michael Petchkovsky <mkpetch@internode.on.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/usb/isp116x.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa25x.h> #include "generic.h" static void isp116x_pfm_delay(struct device *dev, int delay) { /* 400Mhz PXA2 = 2.5ns / instruction */ int cyc = delay / 10; /* 4 Instructions = 4 x 2.5ns = 10ns */ __asm__ volatile ("0:\n" "subs %0, %1, #1\n" "bge 0b\n" :"=r" (cyc) :"0"(cyc) ); } static struct isp116x_platform_data isp116x_pfm_data = { .remote_wakeup_enable = 1, .delay = isp116x_pfm_delay, }; static struct resource isp116x_pfm_resources[] = { [0] = { .start = 0x0d000000, .end = 0x0d000000 + 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x0d000000 + 4, .end = 0x0d000000 + 5, .flags = IORESOURCE_MEM, }, [2] = { .start = 61, .end = 61, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mp900c_dummy_device = { .name = "mp900c_dummy", .id = -1, }; static struct platform_device mp900c_usb = { .name = "isp116x-hcd", .num_resources = ARRAY_SIZE(isp116x_pfm_resources), .resource = isp116x_pfm_resources, .dev.platform_data = &isp116x_pfm_data, }; static struct platform_device *devices[] __initdata = { &mp900c_dummy_device, &mp900c_usb, }; static void __init mp900c_init(void) { printk(KERN_INFO "MobilePro 900/C machine init\n"); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); platform_add_devices(devices, ARRAY_SIZE(devices)); } /* Maintainer - Michael Petchkovsky <mkpetch@internode.on.net> */ MACHINE_START(NEC_MP900, "MobilePro900/C") .atag_offset = 0x220100, .init_time = pxa_timer_init, .map_io = pxa25x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa25x_init_irq, .handle_irq = pxa25x_handle_irq, .init_machine = mp900c_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
elevendroids/kernel-thalamus-custom
arch/mips/pci/pci-ip27.c
4312
5705
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 Christoph Hellwig (hch@lst.de) * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/smp.h> #include <asm/sn/arch.h> #include <asm/pci/bridge.h> #include <asm/paccess.h> #include <asm/sn/intr.h> #include <asm/sn/sn0/hub.h> /* * Max #PCI busses we can handle; ie, max #PCI bridges. */ #define MAX_PCI_BUSSES 40 /* * Max #PCI devices (like scsi controllers) we handle on a bus. */ #define MAX_DEVICES_PER_PCIBUS 8 /* * XXX: No kmalloc available when we do our crosstalk scan, * we should try to move it later in the boot process. */ static struct bridge_controller bridges[MAX_PCI_BUSSES]; /* * Translate from irq to software PCI bus number and PCI slot. */ struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; extern struct pci_ops bridge_pci_ops; int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) { unsigned long offset = NODE_OFFSET(nasid); struct bridge_controller *bc; static int num_bridges = 0; bridge_t *bridge; int slot; pci_probe_only = 1; printk("a bridge\n"); /* XXX: kludge alert.. */ if (!num_bridges) ioport_resource.end = ~0UL; bc = &bridges[num_bridges]; bc->pc.pci_ops = &bridge_pci_ops; bc->pc.mem_resource = &bc->mem; bc->pc.io_resource = &bc->io; bc->pc.index = num_bridges; bc->mem.name = "Bridge PCI MEM"; bc->pc.mem_offset = offset; bc->mem.start = 0; bc->mem.end = ~0UL; bc->mem.flags = IORESOURCE_MEM; bc->io.name = "Bridge IO MEM"; bc->pc.io_offset = offset; bc->io.start = 0UL; bc->io.end = ~0UL; bc->io.flags = IORESOURCE_IO; bc->irq_cpu = smp_processor_id(); bc->widget_id = widget_id; bc->nasid = nasid; bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR; /* * point to this bridge */ bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id); /* * Clear all pending interrupts. */ bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR; /* * Until otherwise set up, assume all interrupts are from slot 0 */ bridge->b_int_device = 0x0; /* * swap pio's to pci mem and io space (big windows) */ bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP; #ifdef CONFIG_PAGE_SIZE_4KB bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE; #else /* 16kB or larger */ bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE; #endif /* * Hmm... IRIX sets additional bits in the address which * are documented as reserved in the bridge docs. */ bridge->b_wid_int_upper = 0x8000 | (masterwid << 16); bridge->b_wid_int_lower = 0x01800090; /* PI_INT_PEND_MOD off*/ bridge->b_dir_map = (masterwid << 20); /* DMA */ bridge->b_int_enable = 0; for (slot = 0; slot < 8; slot ++) { bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR; bc->pci_int[slot] = -1; } bridge->b_wid_tflush; /* wait until Bridge PIO complete */ bc->base = bridge; register_pci_controller(&bc->pc); num_bridges++; return 0; } /* * All observed requests have pin == 1. We could have a global here, that * gets incremented and returned every time - unfortunately, pci_map_irq * may be called on the same device over and over, and need to return the * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7]. * * A given PCI device, in general, should be able to intr any of the cpus * on any one of the hubs connected to its xbow. */ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) { while (dev->bus->parent) { /* Move up the chain of bridges. */ dev = dev->bus->self; } return dev; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); struct pci_dev *rdev = bridge_root_dev(dev); int slot = PCI_SLOT(rdev->devfn); int irq; irq = bc->pci_int[slot]; if (irq == -1) { irq = request_bridge_irq(bc); if (irq < 0) return irq; bc->pci_int[slot] = irq; } irq_to_bridge[irq] = bc; irq_to_slot[irq] = slot; dev->irq = irq; return 0; } /* * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses * to find the slot number in sense of the bridge device register. * XXX This also means multiple devices might rely on conflicting bridge * settings. */ static inline void pci_disable_swapping(struct pci_dev *dev) { struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); bridge_t *bridge = bc->base; int slot = PCI_SLOT(dev->devfn); /* Turn off byte swapping */ bridge->b_device[slot].reg &= ~BRIDGE_DEV_SWAP_DIR; bridge->b_widget.w_tflush; /* Flush */ } static inline void pci_enable_swapping(struct pci_dev *dev) { struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); bridge_t *bridge = bc->base; int slot = PCI_SLOT(dev->devfn); /* Turn on byte swapping */ bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR; bridge->b_widget.w_tflush; /* Flush */ } static void __init pci_fixup_ioc3(struct pci_dev *d) { pci_disable_swapping(d); } int pcibus_to_node(struct pci_bus *bus) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); return bc->nasid; } EXPORT_SYMBOL(pcibus_to_node); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, pci_fixup_ioc3);
gpl-2.0
AuzOne/auzone_kenel
sound/oss/ad1848.c
5080
75999
/* * sound/oss/ad1848.c * * The low level driver for the AD1848/CS4248 codec chip which * is used for example in the MS Sound System. * * The CS4231 which is used in the GUS MAX and some other cards is * upwards compatible with AD1848 and this driver is able to drive it. * * CS4231A and AD1845 are upward compatible with CS4231. However * the new features of these chips are different. * * CS4232 is a PnP audio chip which contains a CS4231A (and SB, MPU). * CS4232A is an improved version of CS4232. * * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * general sleep/wakeup clean up. * Alan Cox : reformatted. Fixed SMP bugs. Moved to kernel alloc/free * of irqs. Use dev_id. * Christoph Hellwig : adapted to module_init/module_exit * Aki Laukkanen : added power management support * Arnaldo C. de Melo : added missing restore_flags in ad1848_resume * Miguel Freitas : added ISA PnP support * Alan Cox : Added CS4236->4239 identification * Daniel T. Cobra : Alernate config/mixer for later chips * Alan Cox : Merged chip idents and config code * * TODO * APM save restore assist code on IBM thinkpad * * Status: * Tested. Believed fully functional. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/isapnp.h> #include <linux/pnp.h> #include <linux/spinlock.h> #define DEB(x) #define DEB1(x) #include "sound_config.h" #include "ad1848.h" #include "ad1848_mixer.h" typedef struct { spinlock_t lock; int base; int irq; int dma1, dma2; int dual_dma; /* 1, when two DMA channels allocated */ int subtype; unsigned char MCE_bit; unsigned char saved_regs[64]; /* Includes extended register space */ int debug_flag; int audio_flags; int record_dev, playback_dev; int xfer_count; int audio_mode; int open_mode; int intr_active; char *chip_name, *name; int model; #define MD_1848 1 #define MD_4231 2 #define MD_4231A 3 #define MD_1845 4 #define MD_4232 5 #define MD_C930 6 #define MD_IWAVE 7 #define MD_4235 8 /* Crystal Audio CS4235 */ #define MD_1845_SSCAPE 9 /* Ensoniq Soundscape PNP*/ #define MD_4236 10 /* 4236 and higher */ #define MD_42xB 11 /* CS 42xB */ #define MD_4239 12 /* CS4239 */ /* Mixer parameters */ int recmask; int supported_devices, orig_devices; int supported_rec_devices, orig_rec_devices; int *levels; short mixer_reroute[32]; int dev_no; volatile unsigned long timer_ticks; int timer_running; int irq_ok; mixer_ents *mix_devices; int mixer_output_port; } ad1848_info; typedef struct ad1848_port_info { int open_mode; int speed; unsigned char speed_bits; int channels; int audio_format; unsigned char format_bits; } ad1848_port_info; static struct address_info cfg; static int nr_ad1848_devs; static bool deskpro_xl; static bool deskpro_m; static bool soundpro; static volatile signed char irq2dev[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; #ifndef EXCLUDE_TIMERS static int timer_installed = -1; #endif static int loaded; static int ad_format_mask[13 /*devc->model */ ] = { 0, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW, /* AD1845 */ AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE /* CS4235 */, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW /* Ensoniq Soundscape*/, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM, AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM }; static ad1848_info adev_info[MAX_AUDIO_DEV]; #define io_Index_Addr(d) ((d)->base) #define io_Indexed_Data(d) ((d)->base+1) #define io_Status(d) ((d)->base+2) #define io_Polled_IO(d) ((d)->base+3) static struct { unsigned char flags; #define CAP_F_TIMER 0x01 } capabilities [10 /*devc->model */ ] = { {0} ,{0} /* MD_1848 */ ,{CAP_F_TIMER} /* MD_4231 */ ,{CAP_F_TIMER} /* MD_4231A */ ,{CAP_F_TIMER} /* MD_1845 */ ,{CAP_F_TIMER} /* MD_4232 */ ,{0} /* MD_C930 */ ,{CAP_F_TIMER} /* MD_IWAVE */ ,{0} /* MD_4235 */ ,{CAP_F_TIMER} /* MD_1845_SSCAPE */ }; #ifdef CONFIG_PNP static int isapnp = 1; static int isapnpjump; static bool reverse; static int audio_activated; #else static int isapnp; #endif static int ad1848_open(int dev, int mode); static void ad1848_close(int dev); static void ad1848_output_block(int dev, unsigned long buf, int count, int intrflag); static void ad1848_start_input(int dev, unsigned long buf, int count, int intrflag); static int ad1848_prepare_for_output(int dev, int bsize, int bcount); static int ad1848_prepare_for_input(int dev, int bsize, int bcount); static void ad1848_halt(int dev); static void ad1848_halt_input(int dev); static void ad1848_halt_output(int dev); static void ad1848_trigger(int dev, int bits); static irqreturn_t adintr(int irq, void *dev_id); #ifndef EXCLUDE_TIMERS static int ad1848_tmr_install(int dev); static void ad1848_tmr_reprogram(int dev); #endif static int ad_read(ad1848_info * devc, int reg) { int x; int timeout = 900000; while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */ timeout--; if(reg < 32) { outb(((unsigned char) (reg & 0xff) | devc->MCE_bit), io_Index_Addr(devc)); x = inb(io_Indexed_Data(devc)); } else { int xreg, xra; xreg = (reg & 0xff) - 32; xra = (((xreg & 0x0f) << 4) & 0xf0) | 0x08 | ((xreg & 0x10) >> 2); outb(((unsigned char) (23 & 0xff) | devc->MCE_bit), io_Index_Addr(devc)); outb(((unsigned char) (xra & 0xff)), io_Indexed_Data(devc)); x = inb(io_Indexed_Data(devc)); } return x; } static void ad_write(ad1848_info * devc, int reg, int data) { int timeout = 900000; while (timeout > 0 && inb(devc->base) == 0x80) /* Are we initializing */ timeout--; if(reg < 32) { outb(((unsigned char) (reg & 0xff) | devc->MCE_bit), io_Index_Addr(devc)); outb(((unsigned char) (data & 0xff)), io_Indexed_Data(devc)); } else { int xreg, xra; xreg = (reg & 0xff) - 32; xra = (((xreg & 0x0f) << 4) & 0xf0) | 0x08 | ((xreg & 0x10) >> 2); outb(((unsigned char) (23 & 0xff) | devc->MCE_bit), io_Index_Addr(devc)); outb(((unsigned char) (xra & 0xff)), io_Indexed_Data(devc)); outb((unsigned char) (data & 0xff), io_Indexed_Data(devc)); } } static void wait_for_calibration(ad1848_info * devc) { int timeout = 0; /* * Wait until the auto calibration process has finished. * * 1) Wait until the chip becomes ready (reads don't return 0x80). * 2) Wait until the ACI bit of I11 gets on and then off. */ timeout = 100000; while (timeout > 0 && inb(devc->base) == 0x80) timeout--; if (inb(devc->base) & 0x80) printk(KERN_WARNING "ad1848: Auto calibration timed out(1).\n"); timeout = 100; while (timeout > 0 && !(ad_read(devc, 11) & 0x20)) timeout--; if (!(ad_read(devc, 11) & 0x20)) return; timeout = 80000; while (timeout > 0 && (ad_read(devc, 11) & 0x20)) timeout--; if (ad_read(devc, 11) & 0x20) if ((devc->model != MD_1845) && (devc->model != MD_1845_SSCAPE)) printk(KERN_WARNING "ad1848: Auto calibration timed out(3).\n"); } static void ad_mute(ad1848_info * devc) { int i; unsigned char prev; /* * Save old register settings and mute output channels */ for (i = 6; i < 8; i++) { prev = devc->saved_regs[i] = ad_read(devc, i); } } static void ad_unmute(ad1848_info * devc) { } static void ad_enter_MCE(ad1848_info * devc) { int timeout = 1000; unsigned short prev; while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */ timeout--; devc->MCE_bit = 0x40; prev = inb(io_Index_Addr(devc)); if (prev & 0x40) { return; } outb((devc->MCE_bit), io_Index_Addr(devc)); } static void ad_leave_MCE(ad1848_info * devc) { unsigned char prev, acal; int timeout = 1000; while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */ timeout--; acal = ad_read(devc, 9); devc->MCE_bit = 0x00; prev = inb(io_Index_Addr(devc)); outb((0x00), io_Index_Addr(devc)); /* Clear the MCE bit */ if ((prev & 0x40) == 0) /* Not in MCE mode */ { return; } outb((0x00), io_Index_Addr(devc)); /* Clear the MCE bit */ if (acal & 0x08) /* Auto calibration is enabled */ wait_for_calibration(devc); } static int ad1848_set_recmask(ad1848_info * devc, int mask) { unsigned char recdev; int i, n; unsigned long flags; mask &= devc->supported_rec_devices; /* Rename the mixer bits if necessary */ for (i = 0; i < 32; i++) { if (devc->mixer_reroute[i] != i) { if (mask & (1 << i)) { mask &= ~(1 << i); mask |= (1 << devc->mixer_reroute[i]); } } } n = 0; for (i = 0; i < 32; i++) /* Count selected device bits */ if (mask & (1 << i)) n++; spin_lock_irqsave(&devc->lock,flags); if (!soundpro) { if (n == 0) mask = SOUND_MASK_MIC; else if (n != 1) { /* Too many devices selected */ mask &= ~devc->recmask; /* Filter out active settings */ n = 0; for (i = 0; i < 32; i++) /* Count selected device bits */ if (mask & (1 << i)) n++; if (n != 1) mask = SOUND_MASK_MIC; } switch (mask) { case SOUND_MASK_MIC: recdev = 2; break; case SOUND_MASK_LINE: case SOUND_MASK_LINE3: recdev = 0; break; case SOUND_MASK_CD: case SOUND_MASK_LINE1: recdev = 1; break; case SOUND_MASK_IMIX: recdev = 3; break; default: mask = SOUND_MASK_MIC; recdev = 2; } recdev <<= 6; ad_write(devc, 0, (ad_read(devc, 0) & 0x3f) | recdev); ad_write(devc, 1, (ad_read(devc, 1) & 0x3f) | recdev); } else { /* soundpro */ unsigned char val; int set_rec_bit; int j; for (i = 0; i < 32; i++) { /* For each bit */ if ((devc->supported_rec_devices & (1 << i)) == 0) continue; /* Device not supported */ for (j = LEFT_CHN; j <= RIGHT_CHN; j++) { if (devc->mix_devices[i][j].nbits == 0) /* Inexistent channel */ continue; /* * This is tricky: * set_rec_bit becomes 1 if the corresponding bit in mask is set * then it gets flipped if the polarity is inverse */ set_rec_bit = ((mask & (1 << i)) != 0) ^ devc->mix_devices[i][j].recpol; val = ad_read(devc, devc->mix_devices[i][j].recreg); val &= ~(1 << devc->mix_devices[i][j].recpos); val |= (set_rec_bit << devc->mix_devices[i][j].recpos); ad_write(devc, devc->mix_devices[i][j].recreg, val); } } } spin_unlock_irqrestore(&devc->lock,flags); /* Rename the mixer bits back if necessary */ for (i = 0; i < 32; i++) { if (devc->mixer_reroute[i] != i) { if (mask & (1 << devc->mixer_reroute[i])) { mask &= ~(1 << devc->mixer_reroute[i]); mask |= (1 << i); } } } devc->recmask = mask; return mask; } static void oss_change_bits(ad1848_info *devc, unsigned char *regval, unsigned char *muteval, int dev, int chn, int newval) { unsigned char mask; int shift; int mute; int mutemask; int set_mute_bit; set_mute_bit = (newval == 0) ^ devc->mix_devices[dev][chn].mutepol; if (devc->mix_devices[dev][chn].polarity == 1) /* Reverse */ newval = 100 - newval; mask = (1 << devc->mix_devices[dev][chn].nbits) - 1; shift = devc->mix_devices[dev][chn].bitpos; if (devc->mix_devices[dev][chn].mutepos == 8) { /* if there is no mute bit */ mute = 0; /* No mute bit; do nothing special */ mutemask = ~0; /* No mute bit; do nothing special */ } else { mute = (set_mute_bit << devc->mix_devices[dev][chn].mutepos); mutemask = ~(1 << devc->mix_devices[dev][chn].mutepos); } newval = (int) ((newval * mask) + 50) / 100; /* Scale it */ *regval &= ~(mask << shift); /* Clear bits */ *regval |= (newval & mask) << shift; /* Set new value */ *muteval &= mutemask; *muteval |= mute; } static int ad1848_mixer_get(ad1848_info * devc, int dev) { if (!((1 << dev) & devc->supported_devices)) return -EINVAL; dev = devc->mixer_reroute[dev]; return devc->levels[dev]; } static void ad1848_mixer_set_channel(ad1848_info *devc, int dev, int value, int channel) { int regoffs, muteregoffs; unsigned char val, muteval; unsigned long flags; regoffs = devc->mix_devices[dev][channel].regno; muteregoffs = devc->mix_devices[dev][channel].mutereg; val = ad_read(devc, regoffs); if (muteregoffs != regoffs) { muteval = ad_read(devc, muteregoffs); oss_change_bits(devc, &val, &muteval, dev, channel, value); } else oss_change_bits(devc, &val, &val, dev, channel, value); spin_lock_irqsave(&devc->lock,flags); ad_write(devc, regoffs, val); devc->saved_regs[regoffs] = val; if (muteregoffs != regoffs) { ad_write(devc, muteregoffs, muteval); devc->saved_regs[muteregoffs] = muteval; } spin_unlock_irqrestore(&devc->lock,flags); } static int ad1848_mixer_set(ad1848_info * devc, int dev, int value) { int left = value & 0x000000ff; int right = (value & 0x0000ff00) >> 8; int retvol; if (dev > 31) return -EINVAL; if (!(devc->supported_devices & (1 << dev))) return -EINVAL; dev = devc->mixer_reroute[dev]; if (devc->mix_devices[dev][LEFT_CHN].nbits == 0) return -EINVAL; if (left > 100) left = 100; if (right > 100) right = 100; if (devc->mix_devices[dev][RIGHT_CHN].nbits == 0) /* Mono control */ right = left; retvol = left | (right << 8); /* Scale volumes */ left = mix_cvt[left]; right = mix_cvt[right]; devc->levels[dev] = retvol; /* * Set the left channel */ ad1848_mixer_set_channel(devc, dev, left, LEFT_CHN); /* * Set the right channel */ if (devc->mix_devices[dev][RIGHT_CHN].nbits == 0) goto out; ad1848_mixer_set_channel(devc, dev, right, RIGHT_CHN); out: return retvol; } static void ad1848_mixer_reset(ad1848_info * devc) { int i; char name[32]; unsigned long flags; devc->mix_devices = &(ad1848_mix_devices[0]); sprintf(name, "%s_%d", devc->chip_name, nr_ad1848_devs); for (i = 0; i < 32; i++) devc->mixer_reroute[i] = i; devc->supported_rec_devices = MODE1_REC_DEVICES; switch (devc->model) { case MD_4231: case MD_4231A: case MD_1845: case MD_1845_SSCAPE: devc->supported_devices = MODE2_MIXER_DEVICES; break; case MD_C930: devc->supported_devices = C930_MIXER_DEVICES; devc->mix_devices = &(c930_mix_devices[0]); break; case MD_IWAVE: devc->supported_devices = MODE3_MIXER_DEVICES; devc->mix_devices = &(iwave_mix_devices[0]); break; case MD_42xB: case MD_4239: devc->mix_devices = &(cs42xb_mix_devices[0]); devc->supported_devices = MODE3_MIXER_DEVICES; break; case MD_4232: case MD_4235: case MD_4236: devc->supported_devices = MODE3_MIXER_DEVICES; break; case MD_1848: if (soundpro) { devc->supported_devices = SPRO_MIXER_DEVICES; devc->supported_rec_devices = SPRO_REC_DEVICES; devc->mix_devices = &(spro_mix_devices[0]); break; } default: devc->supported_devices = MODE1_MIXER_DEVICES; } devc->orig_devices = devc->supported_devices; devc->orig_rec_devices = devc->supported_rec_devices; devc->levels = load_mixer_volumes(name, default_mixer_levels, 1); for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (devc->supported_devices & (1 << i)) ad1848_mixer_set(devc, i, devc->levels[i]); } ad1848_set_recmask(devc, SOUND_MASK_MIC); devc->mixer_output_port = devc->levels[31] | AUDIO_HEADPHONE | AUDIO_LINE_OUT; spin_lock_irqsave(&devc->lock,flags); if (!soundpro) { if (devc->mixer_output_port & AUDIO_SPEAKER) ad_write(devc, 26, ad_read(devc, 26) & ~0x40); /* Unmute mono out */ else ad_write(devc, 26, ad_read(devc, 26) | 0x40); /* Mute mono out */ } else { /* * From the "wouldn't it be nice if the mixer API had (better) * support for custom stuff" category */ /* Enable surround mode and SB16 mixer */ ad_write(devc, 16, 0x60); } spin_unlock_irqrestore(&devc->lock,flags); } static int ad1848_mixer_ioctl(int dev, unsigned int cmd, void __user *arg) { ad1848_info *devc = mixer_devs[dev]->devc; int val; if (cmd == SOUND_MIXER_PRIVATE1) { if (get_user(val, (int __user *)arg)) return -EFAULT; if (val != 0xffff) { unsigned long flags; val &= (AUDIO_SPEAKER | AUDIO_HEADPHONE | AUDIO_LINE_OUT); devc->mixer_output_port = val; val |= AUDIO_HEADPHONE | AUDIO_LINE_OUT; /* Always on */ devc->mixer_output_port = val; spin_lock_irqsave(&devc->lock,flags); if (val & AUDIO_SPEAKER) ad_write(devc, 26, ad_read(devc, 26) & ~0x40); /* Unmute mono out */ else ad_write(devc, 26, ad_read(devc, 26) | 0x40); /* Mute mono out */ spin_unlock_irqrestore(&devc->lock,flags); } val = devc->mixer_output_port; return put_user(val, (int __user *)arg); } if (cmd == SOUND_MIXER_PRIVATE2) { if (get_user(val, (int __user *)arg)) return -EFAULT; return(ad1848_control(AD1848_MIXER_REROUTE, val)); } if (((cmd >> 8) & 0xff) == 'M') { if (_SIOC_DIR(cmd) & _SIOC_WRITE) { switch (cmd & 0xff) { case SOUND_MIXER_RECSRC: if (get_user(val, (int __user *)arg)) return -EFAULT; val = ad1848_set_recmask(devc, val); break; default: if (get_user(val, (int __user *)arg)) return -EFAULT; val = ad1848_mixer_set(devc, cmd & 0xff, val); break; } return put_user(val, (int __user *)arg); } else { switch (cmd & 0xff) { /* * Return parameters */ case SOUND_MIXER_RECSRC: val = devc->recmask; break; case SOUND_MIXER_DEVMASK: val = devc->supported_devices; break; case SOUND_MIXER_STEREODEVS: val = devc->supported_devices; if (devc->model != MD_C930) val &= ~(SOUND_MASK_SPEAKER | SOUND_MASK_IMIX); break; case SOUND_MIXER_RECMASK: val = devc->supported_rec_devices; break; case SOUND_MIXER_CAPS: val=SOUND_CAP_EXCL_INPUT; break; default: val = ad1848_mixer_get(devc, cmd & 0xff); break; } return put_user(val, (int __user *)arg); } } else return -EINVAL; } static int ad1848_set_speed(int dev, int arg) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; /* * The sampling speed is encoded in the least significant nibble of I8. The * LSB selects the clock source (0=24.576 MHz, 1=16.9344 MHz) and other * three bits select the divisor (indirectly): * * The available speeds are in the following table. Keep the speeds in * the increasing order. */ typedef struct { int speed; unsigned char bits; } speed_struct; static speed_struct speed_table[] = { {5510, (0 << 1) | 1}, {5510, (0 << 1) | 1}, {6620, (7 << 1) | 1}, {8000, (0 << 1) | 0}, {9600, (7 << 1) | 0}, {11025, (1 << 1) | 1}, {16000, (1 << 1) | 0}, {18900, (2 << 1) | 1}, {22050, (3 << 1) | 1}, {27420, (2 << 1) | 0}, {32000, (3 << 1) | 0}, {33075, (6 << 1) | 1}, {37800, (4 << 1) | 1}, {44100, (5 << 1) | 1}, {48000, (6 << 1) | 0} }; int i, n, selected = -1; n = sizeof(speed_table) / sizeof(speed_struct); if (arg <= 0) return portc->speed; if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) /* AD1845 has different timer than others */ { if (arg < 4000) arg = 4000; if (arg > 50000) arg = 50000; portc->speed = arg; portc->speed_bits = speed_table[3].bits; return portc->speed; } if (arg < speed_table[0].speed) selected = 0; if (arg > speed_table[n - 1].speed) selected = n - 1; for (i = 1 /*really */ ; selected == -1 && i < n; i++) { if (speed_table[i].speed == arg) selected = i; else if (speed_table[i].speed > arg) { int diff1, diff2; diff1 = arg - speed_table[i - 1].speed; diff2 = speed_table[i].speed - arg; if (diff1 < diff2) selected = i - 1; else selected = i; } } if (selected == -1) { printk(KERN_WARNING "ad1848: Can't find speed???\n"); selected = 3; } portc->speed = speed_table[selected].speed; portc->speed_bits = speed_table[selected].bits; return portc->speed; } static short ad1848_set_channels(int dev, short arg) { ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; if (arg != 1 && arg != 2) return portc->channels; portc->channels = arg; return arg; } static unsigned int ad1848_set_bits(int dev, unsigned int arg) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; static struct format_tbl { int format; unsigned char bits; } format2bits[] = { { 0, 0 } , { AFMT_MU_LAW, 1 } , { AFMT_A_LAW, 3 } , { AFMT_IMA_ADPCM, 5 } , { AFMT_U8, 0 } , { AFMT_S16_LE, 2 } , { AFMT_S16_BE, 6 } , { AFMT_S8, 0 } , { AFMT_U16_LE, 0 } , { AFMT_U16_BE, 0 } }; int i, n = sizeof(format2bits) / sizeof(struct format_tbl); if (arg == 0) return portc->audio_format; if (!(arg & ad_format_mask[devc->model])) arg = AFMT_U8; portc->audio_format = arg; for (i = 0; i < n; i++) if (format2bits[i].format == arg) { if ((portc->format_bits = format2bits[i].bits) == 0) return portc->audio_format = AFMT_U8; /* Was not supported */ return arg; } /* Still hanging here. Something must be terribly wrong */ portc->format_bits = 0; return portc->audio_format = AFMT_U8; } static struct audio_driver ad1848_audio_driver = { .owner = THIS_MODULE, .open = ad1848_open, .close = ad1848_close, .output_block = ad1848_output_block, .start_input = ad1848_start_input, .prepare_for_input = ad1848_prepare_for_input, .prepare_for_output = ad1848_prepare_for_output, .halt_io = ad1848_halt, .halt_input = ad1848_halt_input, .halt_output = ad1848_halt_output, .trigger = ad1848_trigger, .set_speed = ad1848_set_speed, .set_bits = ad1848_set_bits, .set_channels = ad1848_set_channels }; static struct mixer_operations ad1848_mixer_operations = { .owner = THIS_MODULE, .id = "SOUNDPORT", .name = "AD1848/CS4248/CS4231", .ioctl = ad1848_mixer_ioctl }; static int ad1848_open(int dev, int mode) { ad1848_info *devc; ad1848_port_info *portc; unsigned long flags; if (dev < 0 || dev >= num_audiodevs) return -ENXIO; devc = (ad1848_info *) audio_devs[dev]->devc; portc = (ad1848_port_info *) audio_devs[dev]->portc; /* here we don't have to protect against intr */ spin_lock(&devc->lock); if (portc->open_mode || (devc->open_mode & mode)) { spin_unlock(&devc->lock); return -EBUSY; } devc->dual_dma = 0; if (audio_devs[dev]->flags & DMA_DUPLEX) { devc->dual_dma = 1; } devc->intr_active = 0; devc->audio_mode = 0; devc->open_mode |= mode; portc->open_mode = mode; spin_unlock(&devc->lock); ad1848_trigger(dev, 0); if (mode & OPEN_READ) devc->record_dev = dev; if (mode & OPEN_WRITE) devc->playback_dev = dev; /* * Mute output until the playback really starts. This decreases clicking (hope so). */ spin_lock_irqsave(&devc->lock,flags); ad_mute(devc); spin_unlock_irqrestore(&devc->lock,flags); return 0; } static void ad1848_close(int dev) { unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; DEB(printk("ad1848_close(void)\n")); devc->intr_active = 0; ad1848_halt(dev); spin_lock_irqsave(&devc->lock,flags); devc->audio_mode = 0; devc->open_mode &= ~portc->open_mode; portc->open_mode = 0; ad_unmute(devc); spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_output_block(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags, cnt; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; cnt = count; if (portc->audio_format == AFMT_IMA_ADPCM) { cnt /= 4; } else { if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE)) /* 16 bit data */ cnt >>= 1; } if (portc->channels > 1) cnt >>= 1; cnt--; if ((devc->audio_mode & PCM_ENABLE_OUTPUT) && (audio_devs[dev]->flags & DMA_AUTOMODE) && intrflag && cnt == devc->xfer_count) { devc->audio_mode |= PCM_ENABLE_OUTPUT; devc->intr_active = 1; return; /* * Auto DMA mode on. No need to react */ } spin_lock_irqsave(&devc->lock,flags); ad_write(devc, 15, (unsigned char) (cnt & 0xff)); ad_write(devc, 14, (unsigned char) ((cnt >> 8) & 0xff)); devc->xfer_count = cnt; devc->audio_mode |= PCM_ENABLE_OUTPUT; devc->intr_active = 1; spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_start_input(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags, cnt; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; cnt = count; if (portc->audio_format == AFMT_IMA_ADPCM) { cnt /= 4; } else { if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE)) /* 16 bit data */ cnt >>= 1; } if (portc->channels > 1) cnt >>= 1; cnt--; if ((devc->audio_mode & PCM_ENABLE_INPUT) && (audio_devs[dev]->flags & DMA_AUTOMODE) && intrflag && cnt == devc->xfer_count) { devc->audio_mode |= PCM_ENABLE_INPUT; devc->intr_active = 1; return; /* * Auto DMA mode on. No need to react */ } spin_lock_irqsave(&devc->lock,flags); if (devc->model == MD_1848) { ad_write(devc, 15, (unsigned char) (cnt & 0xff)); ad_write(devc, 14, (unsigned char) ((cnt >> 8) & 0xff)); } else { ad_write(devc, 31, (unsigned char) (cnt & 0xff)); ad_write(devc, 30, (unsigned char) ((cnt >> 8) & 0xff)); } ad_unmute(devc); devc->xfer_count = cnt; devc->audio_mode |= PCM_ENABLE_INPUT; devc->intr_active = 1; spin_unlock_irqrestore(&devc->lock,flags); } static int ad1848_prepare_for_output(int dev, int bsize, int bcount) { int timeout; unsigned char fs, old_fs, tmp = 0; unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; ad_mute(devc); spin_lock_irqsave(&devc->lock,flags); fs = portc->speed_bits | (portc->format_bits << 5); if (portc->channels > 1) fs |= 0x10; ad_enter_MCE(devc); /* Enables changes to the format select reg */ if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) /* Use alternate speed select registers */ { fs &= 0xf0; /* Mask off the rate select bits */ ad_write(devc, 22, (portc->speed >> 8) & 0xff); /* Speed MSB */ ad_write(devc, 23, portc->speed & 0xff); /* Speed LSB */ } old_fs = ad_read(devc, 8); if (devc->model == MD_4232 || devc->model >= MD_4236) { tmp = ad_read(devc, 16); ad_write(devc, 16, tmp | 0x30); } if (devc->model == MD_IWAVE) ad_write(devc, 17, 0xc2); /* Disable variable frequency select */ ad_write(devc, 8, fs); /* * Write to I8 starts resynchronization. Wait until it completes. */ timeout = 0; while (timeout < 100 && inb(devc->base) != 0x80) timeout++; timeout = 0; while (timeout < 10000 && inb(devc->base) == 0x80) timeout++; if (devc->model >= MD_4232) ad_write(devc, 16, tmp & ~0x30); ad_leave_MCE(devc); /* * Starts the calibration process. */ spin_unlock_irqrestore(&devc->lock,flags); devc->xfer_count = 0; #ifndef EXCLUDE_TIMERS if (dev == timer_installed && devc->timer_running) if ((fs & 0x01) != (old_fs & 0x01)) { ad1848_tmr_reprogram(dev); } #endif ad1848_halt_output(dev); return 0; } static int ad1848_prepare_for_input(int dev, int bsize, int bcount) { int timeout; unsigned char fs, old_fs, tmp = 0; unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; if (devc->audio_mode) return 0; spin_lock_irqsave(&devc->lock,flags); fs = portc->speed_bits | (portc->format_bits << 5); if (portc->channels > 1) fs |= 0x10; ad_enter_MCE(devc); /* Enables changes to the format select reg */ if ((devc->model == MD_1845) || (devc->model == MD_1845_SSCAPE)) /* Use alternate speed select registers */ { fs &= 0xf0; /* Mask off the rate select bits */ ad_write(devc, 22, (portc->speed >> 8) & 0xff); /* Speed MSB */ ad_write(devc, 23, portc->speed & 0xff); /* Speed LSB */ } if (devc->model == MD_4232) { tmp = ad_read(devc, 16); ad_write(devc, 16, tmp | 0x30); } if (devc->model == MD_IWAVE) ad_write(devc, 17, 0xc2); /* Disable variable frequency select */ /* * If mode >= 2 (CS4231), set I28. It's the capture format register. */ if (devc->model != MD_1848) { old_fs = ad_read(devc, 28); ad_write(devc, 28, fs); /* * Write to I28 starts resynchronization. Wait until it completes. */ timeout = 0; while (timeout < 100 && inb(devc->base) != 0x80) timeout++; timeout = 0; while (timeout < 10000 && inb(devc->base) == 0x80) timeout++; if (devc->model != MD_1848 && devc->model != MD_1845 && devc->model != MD_1845_SSCAPE) { /* * CS4231 compatible devices don't have separate sampling rate selection * register for recording an playback. The I8 register is shared so we have to * set the speed encoding bits of it too. */ unsigned char tmp = portc->speed_bits | (ad_read(devc, 8) & 0xf0); ad_write(devc, 8, tmp); /* * Write to I8 starts resynchronization. Wait until it completes. */ timeout = 0; while (timeout < 100 && inb(devc->base) != 0x80) timeout++; timeout = 0; while (timeout < 10000 && inb(devc->base) == 0x80) timeout++; } } else { /* For AD1848 set I8. */ old_fs = ad_read(devc, 8); ad_write(devc, 8, fs); /* * Write to I8 starts resynchronization. Wait until it completes. */ timeout = 0; while (timeout < 100 && inb(devc->base) != 0x80) timeout++; timeout = 0; while (timeout < 10000 && inb(devc->base) == 0x80) timeout++; } if (devc->model == MD_4232) ad_write(devc, 16, tmp & ~0x30); ad_leave_MCE(devc); /* * Starts the calibration process. */ spin_unlock_irqrestore(&devc->lock,flags); devc->xfer_count = 0; #ifndef EXCLUDE_TIMERS if (dev == timer_installed && devc->timer_running) { if ((fs & 0x01) != (old_fs & 0x01)) { ad1848_tmr_reprogram(dev); } } #endif ad1848_halt_input(dev); return 0; } static void ad1848_halt(int dev) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; unsigned char bits = ad_read(devc, 9); if (bits & 0x01 && (portc->open_mode & OPEN_WRITE)) ad1848_halt_output(dev); if (bits & 0x02 && (portc->open_mode & OPEN_READ)) ad1848_halt_input(dev); devc->audio_mode = 0; } static void ad1848_halt_input(int dev) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; unsigned long flags; if (!(ad_read(devc, 9) & 0x02)) return; /* Capture not enabled */ spin_lock_irqsave(&devc->lock,flags); ad_mute(devc); { int tmout; if(!isa_dma_bridge_buggy) disable_dma(audio_devs[dev]->dmap_in->dma); for (tmout = 0; tmout < 100000; tmout++) if (ad_read(devc, 11) & 0x10) break; ad_write(devc, 9, ad_read(devc, 9) & ~0x02); /* Stop capture */ if(!isa_dma_bridge_buggy) enable_dma(audio_devs[dev]->dmap_in->dma); devc->audio_mode &= ~PCM_ENABLE_INPUT; } outb(0, io_Status(devc)); /* Clear interrupt status */ outb(0, io_Status(devc)); /* Clear interrupt status */ devc->audio_mode &= ~PCM_ENABLE_INPUT; spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_halt_output(int dev) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; unsigned long flags; if (!(ad_read(devc, 9) & 0x01)) return; /* Playback not enabled */ spin_lock_irqsave(&devc->lock,flags); ad_mute(devc); { int tmout; if(!isa_dma_bridge_buggy) disable_dma(audio_devs[dev]->dmap_out->dma); for (tmout = 0; tmout < 100000; tmout++) if (ad_read(devc, 11) & 0x10) break; ad_write(devc, 9, ad_read(devc, 9) & ~0x01); /* Stop playback */ if(!isa_dma_bridge_buggy) enable_dma(audio_devs[dev]->dmap_out->dma); devc->audio_mode &= ~PCM_ENABLE_OUTPUT; } outb((0), io_Status(devc)); /* Clear interrupt status */ outb((0), io_Status(devc)); /* Clear interrupt status */ devc->audio_mode &= ~PCM_ENABLE_OUTPUT; spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_trigger(int dev, int state) { ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc; unsigned long flags; unsigned char tmp, old; spin_lock_irqsave(&devc->lock,flags); state &= devc->audio_mode; tmp = old = ad_read(devc, 9); if (portc->open_mode & OPEN_READ) { if (state & PCM_ENABLE_INPUT) tmp |= 0x02; else tmp &= ~0x02; } if (portc->open_mode & OPEN_WRITE) { if (state & PCM_ENABLE_OUTPUT) tmp |= 0x01; else tmp &= ~0x01; } /* ad_mute(devc); */ if (tmp != old) { ad_write(devc, 9, tmp); ad_unmute(devc); } spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_init_hw(ad1848_info * devc) { int i; int *init_values; /* * Initial values for the indirect registers of CS4248/AD1848. */ static int init_values_a[] = { 0xa8, 0xa8, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x0c, 0x02, 0x00, 0x8a, 0x01, 0x00, 0x00, /* Positions 16 to 31 just for CS4231/2 and ad1845 */ 0x80, 0x00, 0x10, 0x10, 0x00, 0x00, 0x1f, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static int init_values_b[] = { /* Values for the newer chips Some of the register initialization values were changed. In order to get rid of the click that preceded PCM playback, calibration was disabled on the 10th byte. On that same byte, dual DMA was enabled; on the 11th byte, ADC dithering was enabled, since that is theoretically desirable; on the 13th byte, Mode 3 was selected, to enable access to extended registers. */ 0xa8, 0xa8, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0xe0, 0x01, 0x00, 0x00, 0x80, 0x00, 0x10, 0x10, 0x00, 0x00, 0x1f, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* * Select initialisation data */ init_values = init_values_a; if(devc->model >= MD_4236) init_values = init_values_b; for (i = 0; i < 16; i++) ad_write(devc, i, init_values[i]); ad_mute(devc); /* Initialize some variables */ ad_unmute(devc); /* Leave it unmuted now */ if (devc->model > MD_1848) { if (devc->model == MD_1845_SSCAPE) ad_write(devc, 12, ad_read(devc, 12) | 0x50); else ad_write(devc, 12, ad_read(devc, 12) | 0x40); /* Mode2 = enabled */ if (devc->model == MD_IWAVE) ad_write(devc, 12, 0x6c); /* Select codec mode 3 */ if (devc->model != MD_1845_SSCAPE) for (i = 16; i < 32; i++) ad_write(devc, i, init_values[i]); if (devc->model == MD_IWAVE) ad_write(devc, 16, 0x30); /* Playback and capture counters enabled */ } if (devc->model > MD_1848) { if (devc->audio_flags & DMA_DUPLEX) ad_write(devc, 9, ad_read(devc, 9) & ~0x04); /* Dual DMA mode */ else ad_write(devc, 9, ad_read(devc, 9) | 0x04); /* Single DMA mode */ if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) ad_write(devc, 27, ad_read(devc, 27) | 0x08); /* Alternate freq select enabled */ if (devc->model == MD_IWAVE) { /* Some magic Interwave specific initialization */ ad_write(devc, 12, 0x6c); /* Select codec mode 3 */ ad_write(devc, 16, 0x30); /* Playback and capture counters enabled */ ad_write(devc, 17, 0xc2); /* Alternate feature enable */ } } else { devc->audio_flags &= ~DMA_DUPLEX; ad_write(devc, 9, ad_read(devc, 9) | 0x04); /* Single DMA mode */ if (soundpro) ad_write(devc, 12, ad_read(devc, 12) | 0x40); /* Mode2 = enabled */ } outb((0), io_Status(devc)); /* Clear pending interrupts */ /* * Toggle the MCE bit. It completes the initialization phase. */ ad_enter_MCE(devc); /* In case the bit was off */ ad_leave_MCE(devc); ad1848_mixer_reset(devc); } int ad1848_detect(struct resource *ports, int *ad_flags, int *osp) { unsigned char tmp; ad1848_info *devc = &adev_info[nr_ad1848_devs]; unsigned char tmp1 = 0xff, tmp2 = 0xff; int optiC930 = 0; /* OPTi 82C930 flag */ int interwave = 0; int ad1847_flag = 0; int cs4248_flag = 0; int sscape_flag = 0; int io_base = ports->start; int i; DDB(printk("ad1848_detect(%x)\n", io_base)); if (ad_flags) { if (*ad_flags == 0x12345678) { interwave = 1; *ad_flags = 0; } if (*ad_flags == 0x87654321) { sscape_flag = 1; *ad_flags = 0; } if (*ad_flags == 0x12345677) { cs4248_flag = 1; *ad_flags = 0; } } if (nr_ad1848_devs >= MAX_AUDIO_DEV) { printk(KERN_ERR "ad1848 - Too many audio devices\n"); return 0; } spin_lock_init(&devc->lock); devc->base = io_base; devc->irq_ok = 0; devc->timer_running = 0; devc->MCE_bit = 0x40; devc->irq = 0; devc->open_mode = 0; devc->chip_name = devc->name = "AD1848"; devc->model = MD_1848; /* AD1848 or CS4248 */ devc->levels = NULL; devc->debug_flag = 0; /* * Check that the I/O address is in use. * * The bit 0x80 of the base I/O port is known to be 0 after the * chip has performed its power on initialization. Just assume * this has happened before the OS is starting. * * If the I/O address is unused, it typically returns 0xff. */ if (inb(devc->base) == 0xff) { DDB(printk("ad1848_detect: The base I/O address appears to be dead\n")); } /* * Wait for the device to stop initialization */ DDB(printk("ad1848_detect() - step 0\n")); for (i = 0; i < 10000000; i++) { unsigned char x = inb(devc->base); if (x == 0xff || !(x & 0x80)) break; } DDB(printk("ad1848_detect() - step A\n")); if (inb(devc->base) == 0x80) /* Not ready. Let's wait */ ad_leave_MCE(devc); if ((inb(devc->base) & 0x80) != 0x00) /* Not a AD1848 */ { DDB(printk("ad1848 detect error - step A (%02x)\n", (int) inb(devc->base))); return 0; } /* * Test if it's possible to change contents of the indirect registers. * Registers 0 and 1 are ADC volume registers. The bit 0x10 is read only * so try to avoid using it. */ DDB(printk("ad1848_detect() - step B\n")); ad_write(devc, 0, 0xaa); ad_write(devc, 1, 0x45); /* 0x55 with bit 0x10 clear */ if ((tmp1 = ad_read(devc, 0)) != 0xaa || (tmp2 = ad_read(devc, 1)) != 0x45) { if (tmp2 == 0x65) /* AD1847 has couple of bits hardcoded to 1 */ ad1847_flag = 1; else { DDB(printk("ad1848 detect error - step B (%x/%x)\n", tmp1, tmp2)); return 0; } } DDB(printk("ad1848_detect() - step C\n")); ad_write(devc, 0, 0x45); ad_write(devc, 1, 0xaa); if ((tmp1 = ad_read(devc, 0)) != 0x45 || (tmp2 = ad_read(devc, 1)) != 0xaa) { if (tmp2 == 0x8a) /* AD1847 has few bits hardcoded to 1 */ ad1847_flag = 1; else { DDB(printk("ad1848 detect error - step C (%x/%x)\n", tmp1, tmp2)); return 0; } } /* * The indirect register I12 has some read only bits. Let's * try to change them. */ DDB(printk("ad1848_detect() - step D\n")); tmp = ad_read(devc, 12); ad_write(devc, 12, (~tmp) & 0x0f); if ((tmp & 0x0f) != ((tmp1 = ad_read(devc, 12)) & 0x0f)) { DDB(printk("ad1848 detect error - step D (%x)\n", tmp1)); return 0; } /* * NOTE! Last 4 bits of the reg I12 tell the chip revision. * 0x01=RevB and 0x0A=RevC. */ /* * The original AD1848/CS4248 has just 15 indirect registers. This means * that I0 and I16 should return the same value (etc.). * However this doesn't work with CS4248. Actually it seems to be impossible * to detect if the chip is a CS4231 or CS4248. * Ensure that the Mode2 enable bit of I12 is 0. Otherwise this test fails * with CS4231. */ /* * OPTi 82C930 has mode2 control bit in another place. This test will fail * with it. Accept this situation as a possible indication of this chip. */ DDB(printk("ad1848_detect() - step F\n")); ad_write(devc, 12, 0); /* Mode2=disabled */ for (i = 0; i < 16; i++) { if ((tmp1 = ad_read(devc, i)) != (tmp2 = ad_read(devc, i + 16))) { DDB(printk("ad1848 detect step F(%d/%x/%x) - OPTi chip???\n", i, tmp1, tmp2)); if (!ad1847_flag) optiC930 = 1; break; } } /* * Try to switch the chip to mode2 (CS4231) by setting the MODE2 bit (0x40). * The bit 0x80 is always 1 in CS4248 and CS4231. */ DDB(printk("ad1848_detect() - step G\n")); if (ad_flags && *ad_flags == 400) *ad_flags = 0; else ad_write(devc, 12, 0x40); /* Set mode2, clear 0x80 */ if (ad_flags) *ad_flags = 0; tmp1 = ad_read(devc, 12); if (tmp1 & 0x80) { if (ad_flags) *ad_flags |= AD_F_CS4248; devc->chip_name = "CS4248"; /* Our best knowledge just now */ } if (optiC930 || (tmp1 & 0xc0) == (0x80 | 0x40)) { /* * CS4231 detected - is it? * * Verify that setting I0 doesn't change I16. */ DDB(printk("ad1848_detect() - step H\n")); ad_write(devc, 16, 0); /* Set I16 to known value */ ad_write(devc, 0, 0x45); if ((tmp1 = ad_read(devc, 16)) != 0x45) /* No change -> CS4231? */ { ad_write(devc, 0, 0xaa); if ((tmp1 = ad_read(devc, 16)) == 0xaa) /* Rotten bits? */ { DDB(printk("ad1848 detect error - step H(%x)\n", tmp1)); return 0; } /* * Verify that some bits of I25 are read only. */ DDB(printk("ad1848_detect() - step I\n")); tmp1 = ad_read(devc, 25); /* Original bits */ ad_write(devc, 25, ~tmp1); /* Invert all bits */ if ((ad_read(devc, 25) & 0xe7) == (tmp1 & 0xe7)) { int id; /* * It's at least CS4231 */ devc->chip_name = "CS4231"; devc->model = MD_4231; /* * It could be an AD1845 or CS4231A as well. * CS4231 and AD1845 report the same revision info in I25 * while the CS4231A reports different. */ id = ad_read(devc, 25); if ((id & 0xe7) == 0x80) /* Device busy??? */ id = ad_read(devc, 25); if ((id & 0xe7) == 0x80) /* Device still busy??? */ id = ad_read(devc, 25); DDB(printk("ad1848_detect() - step J (%02x/%02x)\n", id, ad_read(devc, 25))); if ((id & 0xe7) == 0x80) { /* * It must be a CS4231 or AD1845. The register I23 of * CS4231 is undefined and it appears to be read only. * AD1845 uses I23 for setting sample rate. Assume * the chip is AD1845 if I23 is changeable. */ unsigned char tmp = ad_read(devc, 23); ad_write(devc, 23, ~tmp); if (interwave) { devc->model = MD_IWAVE; devc->chip_name = "IWave"; } else if (ad_read(devc, 23) != tmp) /* AD1845 ? */ { devc->chip_name = "AD1845"; devc->model = MD_1845; } else if (cs4248_flag) { if (ad_flags) *ad_flags |= AD_F_CS4248; devc->chip_name = "CS4248"; devc->model = MD_1848; ad_write(devc, 12, ad_read(devc, 12) & ~0x40); /* Mode2 off */ } ad_write(devc, 23, tmp); /* Restore */ } else { switch (id & 0x1f) { case 3: /* CS4236/CS4235/CS42xB/CS4239 */ { int xid; ad_write(devc, 12, ad_read(devc, 12) | 0x60); /* switch to mode 3 */ ad_write(devc, 23, 0x9c); /* select extended register 25 */ xid = inb(io_Indexed_Data(devc)); ad_write(devc, 12, ad_read(devc, 12) & ~0x60); /* back to mode 0 */ switch (xid & 0x1f) { case 0x00: devc->chip_name = "CS4237B(B)"; devc->model = MD_42xB; break; case 0x08: /* Seems to be a 4238 ?? */ devc->chip_name = "CS4238"; devc->model = MD_42xB; break; case 0x09: devc->chip_name = "CS4238B"; devc->model = MD_42xB; break; case 0x0b: devc->chip_name = "CS4236B"; devc->model = MD_4236; break; case 0x10: devc->chip_name = "CS4237B"; devc->model = MD_42xB; break; case 0x1d: devc->chip_name = "CS4235"; devc->model = MD_4235; break; case 0x1e: devc->chip_name = "CS4239"; devc->model = MD_4239; break; default: printk("Chip ident is %X.\n", xid&0x1F); devc->chip_name = "CS42xx"; devc->model = MD_4232; break; } } break; case 2: /* CS4232/CS4232A */ devc->chip_name = "CS4232"; devc->model = MD_4232; break; case 0: if ((id & 0xe0) == 0xa0) { devc->chip_name = "CS4231A"; devc->model = MD_4231A; } else { devc->chip_name = "CS4321"; devc->model = MD_4231; } break; default: /* maybe */ DDB(printk("ad1848: I25 = %02x/%02x\n", ad_read(devc, 25), ad_read(devc, 25) & 0xe7)); if (optiC930) { devc->chip_name = "82C930"; devc->model = MD_C930; } else { devc->chip_name = "CS4231"; devc->model = MD_4231; } } } } ad_write(devc, 25, tmp1); /* Restore bits */ DDB(printk("ad1848_detect() - step K\n")); } } else if (tmp1 == 0x0a) { /* * Is it perhaps a SoundPro CMI8330? * If so, then we should be able to change indirect registers * greater than I15 after activating MODE2, even though reading * back I12 does not show it. */ /* * Let's try comparing register values */ for (i = 0; i < 16; i++) { if ((tmp1 = ad_read(devc, i)) != (tmp2 = ad_read(devc, i + 16))) { DDB(printk("ad1848 detect step H(%d/%x/%x) - SoundPro chip?\n", i, tmp1, tmp2)); soundpro = 1; devc->chip_name = "SoundPro CMI 8330"; break; } } } DDB(printk("ad1848_detect() - step L\n")); if (ad_flags) { if (devc->model != MD_1848) *ad_flags |= AD_F_CS4231; } DDB(printk("ad1848_detect() - Detected OK\n")); if (devc->model == MD_1848 && ad1847_flag) devc->chip_name = "AD1847"; if (sscape_flag == 1) devc->model = MD_1845_SSCAPE; return 1; } int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback, int dma_capture, int share_dma, int *osp, struct module *owner) { /* * NOTE! If irq < 0, there is another driver which has allocated the IRQ * so that this driver doesn't need to allocate/deallocate it. * The actually used IRQ is ABS(irq). */ int my_dev; char dev_name[100]; int e; ad1848_info *devc = &adev_info[nr_ad1848_devs]; ad1848_port_info *portc = NULL; devc->irq = (irq > 0) ? irq : 0; devc->open_mode = 0; devc->timer_ticks = 0; devc->dma1 = dma_playback; devc->dma2 = dma_capture; devc->subtype = cfg.card_subtype; devc->audio_flags = DMA_AUTOMODE; devc->playback_dev = devc->record_dev = 0; if (name != NULL) devc->name = name; if (name != NULL && name[0] != 0) sprintf(dev_name, "%s (%s)", name, devc->chip_name); else sprintf(dev_name, "Generic audio codec (%s)", devc->chip_name); rename_region(ports, devc->name); conf_printf2(dev_name, devc->base, devc->irq, dma_playback, dma_capture); if (devc->model == MD_1848 || devc->model == MD_C930) devc->audio_flags |= DMA_HARDSTOP; if (devc->model > MD_1848) { if (devc->dma1 == devc->dma2 || devc->dma2 == -1 || devc->dma1 == -1) devc->audio_flags &= ~DMA_DUPLEX; else devc->audio_flags |= DMA_DUPLEX; } portc = kmalloc(sizeof(ad1848_port_info), GFP_KERNEL); if(portc==NULL) { release_region(devc->base, 4); return -1; } if ((my_dev = sound_install_audiodrv(AUDIO_DRIVER_VERSION, dev_name, &ad1848_audio_driver, sizeof(struct audio_driver), devc->audio_flags, ad_format_mask[devc->model], devc, dma_playback, dma_capture)) < 0) { release_region(devc->base, 4); kfree(portc); return -1; } audio_devs[my_dev]->portc = portc; audio_devs[my_dev]->mixer_dev = -1; if (owner) audio_devs[my_dev]->d->owner = owner; memset((char *) portc, 0, sizeof(*portc)); nr_ad1848_devs++; ad1848_init_hw(devc); if (irq > 0) { devc->dev_no = my_dev; if (request_irq(devc->irq, adintr, 0, devc->name, (void *)(long)my_dev) < 0) { printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n"); /* Don't free it either then.. */ devc->irq = 0; } if (capabilities[devc->model].flags & CAP_F_TIMER) { #ifndef CONFIG_SMP int x; unsigned char tmp = ad_read(devc, 16); #endif devc->timer_ticks = 0; ad_write(devc, 21, 0x00); /* Timer MSB */ ad_write(devc, 20, 0x10); /* Timer LSB */ #ifndef CONFIG_SMP ad_write(devc, 16, tmp | 0x40); /* Enable timer */ for (x = 0; x < 100000 && devc->timer_ticks == 0; x++); ad_write(devc, 16, tmp & ~0x40); /* Disable timer */ if (devc->timer_ticks == 0) printk(KERN_WARNING "ad1848: Interrupt test failed (IRQ%d)\n", irq); else { DDB(printk("Interrupt test OK\n")); devc->irq_ok = 1; } #else devc->irq_ok = 1; #endif } else devc->irq_ok = 1; /* Couldn't test. assume it's OK */ } else if (irq < 0) irq2dev[-irq] = devc->dev_no = my_dev; #ifndef EXCLUDE_TIMERS if ((capabilities[devc->model].flags & CAP_F_TIMER) && devc->irq_ok) ad1848_tmr_install(my_dev); #endif if (!share_dma) { if (sound_alloc_dma(dma_playback, devc->name)) printk(KERN_WARNING "ad1848.c: Can't allocate DMA%d\n", dma_playback); if (dma_capture != dma_playback) if (sound_alloc_dma(dma_capture, devc->name)) printk(KERN_WARNING "ad1848.c: Can't allocate DMA%d\n", dma_capture); } if ((e = sound_install_mixer(MIXER_DRIVER_VERSION, dev_name, &ad1848_mixer_operations, sizeof(struct mixer_operations), devc)) >= 0) { audio_devs[my_dev]->mixer_dev = e; if (owner) mixer_devs[e]->owner = owner; } return my_dev; } int ad1848_control(int cmd, int arg) { ad1848_info *devc; unsigned long flags; if (nr_ad1848_devs < 1) return -ENODEV; devc = &adev_info[nr_ad1848_devs - 1]; switch (cmd) { case AD1848_SET_XTAL: /* Change clock frequency of AD1845 (only ) */ if (devc->model != MD_1845 && devc->model != MD_1845_SSCAPE) return -EINVAL; spin_lock_irqsave(&devc->lock,flags); ad_enter_MCE(devc); ad_write(devc, 29, (ad_read(devc, 29) & 0x1f) | (arg << 5)); ad_leave_MCE(devc); spin_unlock_irqrestore(&devc->lock,flags); break; case AD1848_MIXER_REROUTE: { int o = (arg >> 8) & 0xff; int n = arg & 0xff; if (o < 0 || o >= SOUND_MIXER_NRDEVICES) return -EINVAL; if (!(devc->supported_devices & (1 << o)) && !(devc->supported_rec_devices & (1 << o))) return -EINVAL; if (n == SOUND_MIXER_NONE) { /* Just hide this control */ ad1848_mixer_set(devc, o, 0); /* Shut up it */ devc->supported_devices &= ~(1 << o); devc->supported_rec_devices &= ~(1 << o); break; } /* Make the mixer control identified by o to appear as n */ if (n < 0 || n >= SOUND_MIXER_NRDEVICES) return -EINVAL; devc->mixer_reroute[n] = o; /* Rename the control */ if (devc->supported_devices & (1 << o)) devc->supported_devices |= (1 << n); if (devc->supported_rec_devices & (1 << o)) devc->supported_rec_devices |= (1 << n); devc->supported_devices &= ~(1 << o); devc->supported_rec_devices &= ~(1 << o); } break; } return 0; } void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int share_dma) { int i, mixer, dev = 0; ad1848_info *devc = NULL; for (i = 0; devc == NULL && i < nr_ad1848_devs; i++) { if (adev_info[i].base == io_base) { devc = &adev_info[i]; dev = devc->dev_no; } } if (devc != NULL) { kfree(audio_devs[dev]->portc); release_region(devc->base, 4); if (!share_dma) { if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */ free_irq(devc->irq, (void *)(long)devc->dev_no); sound_free_dma(dma_playback); if (dma_playback != dma_capture) sound_free_dma(dma_capture); } mixer = audio_devs[devc->dev_no]->mixer_dev; if(mixer>=0) sound_unload_mixerdev(mixer); nr_ad1848_devs--; for ( ; i < nr_ad1848_devs ; i++) adev_info[i] = adev_info[i+1]; } else printk(KERN_ERR "ad1848: Can't find device to be unloaded. Base=%x\n", io_base); } static irqreturn_t adintr(int irq, void *dev_id) { unsigned char status; ad1848_info *devc; int dev; int alt_stat = 0xff; unsigned char c930_stat = 0; int cnt = 0; dev = (long)dev_id; devc = (ad1848_info *) audio_devs[dev]->devc; interrupt_again: /* Jump back here if int status doesn't reset */ status = inb(io_Status(devc)); if (status == 0x80) printk(KERN_DEBUG "adintr: Why?\n"); if (devc->model == MD_1848) outb((0), io_Status(devc)); /* Clear interrupt status */ if (status & 0x01) { if (devc->model == MD_C930) { /* 82C930 has interrupt status register in MAD16 register MC11 */ spin_lock(&devc->lock); /* 0xe0e is C930 address port * 0xe0f is C930 data port */ outb(11, 0xe0e); c930_stat = inb(0xe0f); outb((~c930_stat), 0xe0f); spin_unlock(&devc->lock); alt_stat = (c930_stat << 2) & 0x30; } else if (devc->model != MD_1848) { spin_lock(&devc->lock); alt_stat = ad_read(devc, 24); ad_write(devc, 24, ad_read(devc, 24) & ~alt_stat); /* Selective ack */ spin_unlock(&devc->lock); } if ((devc->open_mode & OPEN_READ) && (devc->audio_mode & PCM_ENABLE_INPUT) && (alt_stat & 0x20)) { DMAbuf_inputintr(devc->record_dev); } if ((devc->open_mode & OPEN_WRITE) && (devc->audio_mode & PCM_ENABLE_OUTPUT) && (alt_stat & 0x10)) { DMAbuf_outputintr(devc->playback_dev, 1); } if (devc->model != MD_1848 && (alt_stat & 0x40)) /* Timer interrupt */ { devc->timer_ticks++; #ifndef EXCLUDE_TIMERS if (timer_installed == dev && devc->timer_running) sound_timer_interrupt(); #endif } } /* * Sometimes playback or capture interrupts occur while a timer interrupt * is being handled. The interrupt will not be retriggered if we don't * handle it now. Check if an interrupt is still pending and restart * the handler in this case. */ if (inb(io_Status(devc)) & 0x01 && cnt++ < 4) { goto interrupt_again; } return IRQ_HANDLED; } /* * Experimental initialization sequence for the integrated sound system * of the Compaq Deskpro M. */ static int init_deskpro_m(struct address_info *hw_config) { unsigned char tmp; if ((tmp = inb(0xc44)) == 0xff) { DDB(printk("init_deskpro_m: Dead port 0xc44\n")); return 0; } outb(0x10, 0xc44); outb(0x40, 0xc45); outb(0x00, 0xc46); outb(0xe8, 0xc47); outb(0x14, 0xc44); outb(0x40, 0xc45); outb(0x00, 0xc46); outb(0xe8, 0xc47); outb(0x10, 0xc44); return 1; } /* * Experimental initialization sequence for the integrated sound system * of Compaq Deskpro XL. */ static int init_deskpro(struct address_info *hw_config) { unsigned char tmp; if ((tmp = inb(0xc44)) == 0xff) { DDB(printk("init_deskpro: Dead port 0xc44\n")); return 0; } outb((tmp | 0x04), 0xc44); /* Select bank 1 */ if (inb(0xc44) != 0x04) { DDB(printk("init_deskpro: Invalid bank1 signature in port 0xc44\n")); return 0; } /* * OK. It looks like a Deskpro so let's proceed. */ /* * I/O port 0xc44 Audio configuration register. * * bits 0xc0: Audio revision bits * 0x00 = Compaq Business Audio * 0x40 = MS Sound System Compatible (reset default) * 0x80 = Reserved * 0xc0 = Reserved * bit 0x20: No Wait State Enable * 0x00 = Disabled (reset default, DMA mode) * 0x20 = Enabled (programmed I/O mode) * bit 0x10: MS Sound System Decode Enable * 0x00 = Decoding disabled (reset default) * 0x10 = Decoding enabled * bit 0x08: FM Synthesis Decode Enable * 0x00 = Decoding Disabled (reset default) * 0x08 = Decoding enabled * bit 0x04 Bank select * 0x00 = Bank 0 * 0x04 = Bank 1 * bits 0x03 MSS Base address * 0x00 = 0x530 (reset default) * 0x01 = 0x604 * 0x02 = 0xf40 * 0x03 = 0xe80 */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc44 (before): "); outb((tmp & ~0x04), 0xc44); printk("%02x ", inb(0xc44)); outb((tmp | 0x04), 0xc44); printk("%02x\n", inb(0xc44)); #endif /* Set bank 1 of the register */ tmp = 0x58; /* MSS Mode, MSS&FM decode enabled */ switch (hw_config->io_base) { case 0x530: tmp |= 0x00; break; case 0x604: tmp |= 0x01; break; case 0xf40: tmp |= 0x02; break; case 0xe80: tmp |= 0x03; break; default: DDB(printk("init_deskpro: Invalid MSS port %x\n", hw_config->io_base)); return 0; } outb((tmp & ~0x04), 0xc44); /* Write to bank=0 */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc44 (after): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc44)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc44)); #endif /* * I/O port 0xc45 FM Address Decode/MSS ID Register. * * bank=0, bits 0xfe: FM synthesis Decode Compare bits 7:1 (default=0x88) * bank=0, bit 0x01: SBIC Power Control Bit * 0x00 = Powered up * 0x01 = Powered down * bank=1, bits 0xfc: MSS ID (default=0x40) */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc45 (before): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc45)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc45)); #endif outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ outb((0x88), 0xc45); /* FM base 7:0 = 0x88 */ outb((tmp | 0x04), 0xc44); /* Select bank=1 */ outb((0x10), 0xc45); /* MSS ID = 0x10 (MSS port returns 0x04) */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc45 (after): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc45)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc45)); #endif /* * I/O port 0xc46 FM Address Decode/Address ASIC Revision Register. * * bank=0, bits 0xff: FM synthesis Decode Compare bits 15:8 (default=0x03) * bank=1, bits 0xff: Audio addressing ASIC id */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc46 (before): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc46)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc46)); #endif outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ outb((0x03), 0xc46); /* FM base 15:8 = 0x03 */ outb((tmp | 0x04), 0xc44); /* Select bank=1 */ outb((0x11), 0xc46); /* ASIC ID = 0x11 */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc46 (after): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc46)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc46)); #endif /* * I/O port 0xc47 FM Address Decode Register. * * bank=0, bits 0xff: Decode enable selection for various FM address bits * bank=1, bits 0xff: Reserved */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc47 (before): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc47)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc47)); #endif outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ outb((0x7c), 0xc47); /* FM decode enable bits = 0x7c */ outb((tmp | 0x04), 0xc44); /* Select bank=1 */ outb((0x00), 0xc47); /* Reserved bank1 = 0x00 */ #ifdef DEBUGXL /* Debug printing */ printk("Port 0xc47 (after): "); outb((tmp & ~0x04), 0xc44); /* Select bank=0 */ printk("%02x ", inb(0xc47)); outb((tmp | 0x04), 0xc44); /* Select bank=1 */ printk("%02x\n", inb(0xc47)); #endif /* * I/O port 0xc6f = Audio Disable Function Register */ #ifdef DEBUGXL printk("Port 0xc6f (before) = %02x\n", inb(0xc6f)); #endif outb((0x80), 0xc6f); #ifdef DEBUGXL printk("Port 0xc6f (after) = %02x\n", inb(0xc6f)); #endif return 1; } int probe_ms_sound(struct address_info *hw_config, struct resource *ports) { unsigned char tmp; DDB(printk("Entered probe_ms_sound(%x, %d)\n", hw_config->io_base, hw_config->card_subtype)); if (hw_config->card_subtype == 1) /* Has no IRQ/DMA registers */ { /* check_opl3(0x388, hw_config); */ return ad1848_detect(ports, NULL, hw_config->osp); } if (deskpro_xl && hw_config->card_subtype == 2) /* Compaq Deskpro XL */ { if (!init_deskpro(hw_config)) return 0; } if (deskpro_m) /* Compaq Deskpro M */ { if (!init_deskpro_m(hw_config)) return 0; } /* * Check if the IO port returns valid signature. The original MS Sound * system returns 0x04 while some cards (AudioTrix Pro for example) * return 0x00 or 0x0f. */ if ((tmp = inb(hw_config->io_base + 3)) == 0xff) /* Bus float */ { int ret; DDB(printk("I/O address is inactive (%x)\n", tmp)); if (!(ret = ad1848_detect(ports, NULL, hw_config->osp))) return 0; return 1; } DDB(printk("MSS signature = %x\n", tmp & 0x3f)); if ((tmp & 0x3f) != 0x04 && (tmp & 0x3f) != 0x0f && (tmp & 0x3f) != 0x00) { int ret; MDB(printk(KERN_ERR "No MSS signature detected on port 0x%x (0x%x)\n", hw_config->io_base, (int) inb(hw_config->io_base + 3))); DDB(printk("Trying to detect codec anyway but IRQ/DMA may not work\n")); if (!(ret = ad1848_detect(ports, NULL, hw_config->osp))) return 0; hw_config->card_subtype = 1; return 1; } if ((hw_config->irq != 5) && (hw_config->irq != 7) && (hw_config->irq != 9) && (hw_config->irq != 10) && (hw_config->irq != 11) && (hw_config->irq != 12)) { printk(KERN_ERR "MSS: Bad IRQ %d\n", hw_config->irq); return 0; } if (hw_config->dma != 0 && hw_config->dma != 1 && hw_config->dma != 3) { printk(KERN_ERR "MSS: Bad DMA %d\n", hw_config->dma); return 0; } /* * Check that DMA0 is not in use with a 8 bit board. */ if (hw_config->dma == 0 && inb(hw_config->io_base + 3) & 0x80) { printk(KERN_ERR "MSS: Can't use DMA0 with a 8 bit card/slot\n"); return 0; } if (hw_config->irq > 7 && hw_config->irq != 9 && inb(hw_config->io_base + 3) & 0x80) { printk(KERN_ERR "MSS: Can't use IRQ%d with a 8 bit card/slot\n", hw_config->irq); return 0; } return ad1848_detect(ports, NULL, hw_config->osp); } void attach_ms_sound(struct address_info *hw_config, struct resource *ports, struct module *owner) { static signed char interrupt_bits[12] = { -1, -1, -1, -1, -1, 0x00, -1, 0x08, -1, 0x10, 0x18, 0x20 }; signed char bits; char dma2_bit = 0; static char dma_bits[4] = { 1, 2, 0, 3 }; int config_port = hw_config->io_base + 0; int version_port = hw_config->io_base + 3; int dma = hw_config->dma; int dma2 = hw_config->dma2; if (hw_config->card_subtype == 1) /* Has no IRQ/DMA registers */ { hw_config->slots[0] = ad1848_init("MS Sound System", ports, hw_config->irq, hw_config->dma, hw_config->dma2, 0, hw_config->osp, owner); return; } /* * Set the IRQ and DMA addresses. */ bits = interrupt_bits[hw_config->irq]; if (bits == -1) { printk(KERN_ERR "MSS: Bad IRQ %d\n", hw_config->irq); release_region(ports->start, 4); release_region(ports->start - 4, 4); return; } outb((bits | 0x40), config_port); if ((inb(version_port) & 0x40) == 0) printk(KERN_ERR "[MSS: IRQ Conflict?]\n"); /* * Handle the capture DMA channel */ if (dma2 != -1 && dma2 != dma) { if (!((dma == 0 && dma2 == 1) || (dma == 1 && dma2 == 0) || (dma == 3 && dma2 == 0))) { /* Unsupported combination. Try to swap channels */ int tmp = dma; dma = dma2; dma2 = tmp; } if ((dma == 0 && dma2 == 1) || (dma == 1 && dma2 == 0) || (dma == 3 && dma2 == 0)) { dma2_bit = 0x04; /* Enable capture DMA */ } else { printk(KERN_WARNING "MSS: Invalid capture DMA\n"); dma2 = dma; } } else { dma2 = dma; } hw_config->dma = dma; hw_config->dma2 = dma2; outb((bits | dma_bits[dma] | dma2_bit), config_port); /* Write IRQ+DMA setup */ hw_config->slots[0] = ad1848_init("MS Sound System", ports, hw_config->irq, dma, dma2, 0, hw_config->osp, THIS_MODULE); } void unload_ms_sound(struct address_info *hw_config) { ad1848_unload(hw_config->io_base + 4, hw_config->irq, hw_config->dma, hw_config->dma2, 0); sound_unload_audiodev(hw_config->slots[0]); release_region(hw_config->io_base, 4); } #ifndef EXCLUDE_TIMERS /* * Timer stuff (for /dev/music). */ static unsigned int current_interval; static unsigned int ad1848_tmr_start(int dev, unsigned int usecs) { unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; unsigned long xtal_nsecs; /* nanoseconds per xtal oscillator tick */ unsigned long divider; spin_lock_irqsave(&devc->lock,flags); /* * Length of the timer interval (in nanoseconds) depends on the * selected crystal oscillator. Check this from bit 0x01 of I8. * * AD1845 has just one oscillator which has cycle time of 10.050 us * (when a 24.576 MHz xtal oscillator is used). * * Convert requested interval to nanoseconds before computing * the timer divider. */ if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) xtal_nsecs = 10050; else if (ad_read(devc, 8) & 0x01) xtal_nsecs = 9920; else xtal_nsecs = 9969; divider = (usecs * 1000 + xtal_nsecs / 2) / xtal_nsecs; if (divider < 100) /* Don't allow shorter intervals than about 1ms */ divider = 100; if (divider > 65535) /* Overflow check */ divider = 65535; ad_write(devc, 21, (divider >> 8) & 0xff); /* Set upper bits */ ad_write(devc, 20, divider & 0xff); /* Set lower bits */ ad_write(devc, 16, ad_read(devc, 16) | 0x40); /* Start the timer */ devc->timer_running = 1; spin_unlock_irqrestore(&devc->lock,flags); return current_interval = (divider * xtal_nsecs + 500) / 1000; } static void ad1848_tmr_reprogram(int dev) { /* * Audio driver has changed sampling rate so that a different xtal * oscillator was selected. We have to reprogram the timer rate. */ ad1848_tmr_start(dev, current_interval); sound_timer_syncinterval(current_interval); } static void ad1848_tmr_disable(int dev) { unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; spin_lock_irqsave(&devc->lock,flags); ad_write(devc, 16, ad_read(devc, 16) & ~0x40); devc->timer_running = 0; spin_unlock_irqrestore(&devc->lock,flags); } static void ad1848_tmr_restart(int dev) { unsigned long flags; ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc; if (current_interval == 0) return; spin_lock_irqsave(&devc->lock,flags); ad_write(devc, 16, ad_read(devc, 16) | 0x40); devc->timer_running = 1; spin_unlock_irqrestore(&devc->lock,flags); } static struct sound_lowlev_timer ad1848_tmr = { 0, 2, ad1848_tmr_start, ad1848_tmr_disable, ad1848_tmr_restart }; static int ad1848_tmr_install(int dev) { if (timer_installed != -1) return 0; /* Don't install another timer */ timer_installed = ad1848_tmr.dev = dev; sound_timer_init(&ad1848_tmr, audio_devs[dev]->name); return 1; } #endif /* EXCLUDE_TIMERS */ EXPORT_SYMBOL(ad1848_detect); EXPORT_SYMBOL(ad1848_init); EXPORT_SYMBOL(ad1848_unload); EXPORT_SYMBOL(ad1848_control); EXPORT_SYMBOL(probe_ms_sound); EXPORT_SYMBOL(attach_ms_sound); EXPORT_SYMBOL(unload_ms_sound); static int __initdata io = -1; static int __initdata irq = -1; static int __initdata dma = -1; static int __initdata dma2 = -1; static int __initdata type = 0; module_param(io, int, 0); /* I/O for a raw AD1848 card */ module_param(irq, int, 0); /* IRQ to use */ module_param(dma, int, 0); /* First DMA channel */ module_param(dma2, int, 0); /* Second DMA channel */ module_param(type, int, 0); /* Card type */ module_param(deskpro_xl, bool, 0); /* Special magic for Deskpro XL boxen */ module_param(deskpro_m, bool, 0); /* Special magic for Deskpro M box */ module_param(soundpro, bool, 0); /* More special magic for SoundPro chips */ #ifdef CONFIG_PNP module_param(isapnp, int, 0); module_param(isapnpjump, int, 0); module_param(reverse, bool, 0); MODULE_PARM_DESC(isapnp, "When set to 0, Plug & Play support will be disabled"); MODULE_PARM_DESC(isapnpjump, "Jumps to a specific slot in the driver's PnP table. Use the source, Luke."); MODULE_PARM_DESC(reverse, "When set to 1, will reverse ISAPnP search order"); static struct pnp_dev *ad1848_dev = NULL; /* Please add new entries at the end of the table */ static struct { char *name; unsigned short card_vendor, card_device, vendor, function; short mss_io, irq, dma, dma2; /* index into isapnp table */ int type; } ad1848_isapnp_list[] __initdata = { {"CMI 8330 SoundPRO", ISAPNP_VENDOR('C','M','I'), ISAPNP_DEVICE(0x0001), ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), 0, 0, 0,-1, 0}, {"CS4232 based card", ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0000), 0, 0, 0, 1, 0}, {"CS4232 based card", ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0100), 0, 0, 0, 1, 0}, {"OPL3-SA2 WSS mode", ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('Y','M','H'), ISAPNP_FUNCTION(0x0021), 1, 0, 0, 1, 1}, {"Advanced Gravis InterWave Audio", ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001), ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000), 0, 0, 0, 1, 0}, {NULL} }; static struct isapnp_device_id id_table[] __devinitdata = { { ISAPNP_VENDOR('C','M','I'), ISAPNP_DEVICE(0x0001), ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0000), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0100), 0 }, /* The main driver for this card is opl3sa2 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('Y','M','H'), ISAPNP_FUNCTION(0x0021), 0 }, */ { ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001), ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000), 0 }, {0} }; MODULE_DEVICE_TABLE(isapnp, id_table); static struct pnp_dev *activate_dev(char *devname, char *resname, struct pnp_dev *dev) { int err; err = pnp_device_attach(dev); if (err < 0) return(NULL); if((err = pnp_activate_dev(dev)) < 0) { printk(KERN_ERR "ad1848: %s %s config failed (out of resources?)[%d]\n", devname, resname, err); pnp_device_detach(dev); return(NULL); } audio_activated = 1; return(dev); } static struct pnp_dev __init *ad1848_init_generic(struct pnp_card *bus, struct address_info *hw_config, int slot) { /* Configure Audio device */ if((ad1848_dev = pnp_find_dev(bus, ad1848_isapnp_list[slot].vendor, ad1848_isapnp_list[slot].function, NULL))) { if((ad1848_dev = activate_dev(ad1848_isapnp_list[slot].name, "ad1848", ad1848_dev))) { hw_config->io_base = pnp_port_start(ad1848_dev, ad1848_isapnp_list[slot].mss_io); hw_config->irq = pnp_irq(ad1848_dev, ad1848_isapnp_list[slot].irq); hw_config->dma = pnp_dma(ad1848_dev, ad1848_isapnp_list[slot].dma); if(ad1848_isapnp_list[slot].dma2 != -1) hw_config->dma2 = pnp_dma(ad1848_dev, ad1848_isapnp_list[slot].dma2); else hw_config->dma2 = -1; hw_config->card_subtype = ad1848_isapnp_list[slot].type; } else return(NULL); } else return(NULL); return(ad1848_dev); } static int __init ad1848_isapnp_init(struct address_info *hw_config, struct pnp_card *bus, int slot) { char *busname = bus->name[0] ? bus->name : ad1848_isapnp_list[slot].name; /* Initialize this baby. */ if(ad1848_init_generic(bus, hw_config, slot)) { /* We got it. */ printk(KERN_NOTICE "ad1848: PnP reports '%s' at i/o %#x, irq %d, dma %d, %d\n", busname, hw_config->io_base, hw_config->irq, hw_config->dma, hw_config->dma2); return 1; } return 0; } static int __init ad1848_isapnp_probe(struct address_info *hw_config) { static int first = 1; int i; /* Count entries in sb_isapnp_list */ for (i = 0; ad1848_isapnp_list[i].card_vendor != 0; i++); i--; /* Check and adjust isapnpjump */ if( isapnpjump < 0 || isapnpjump > i) { isapnpjump = reverse ? i : 0; printk(KERN_ERR "ad1848: Valid range for isapnpjump is 0-%d. Adjusted to %d.\n", i, isapnpjump); } if(!first || !reverse) i = isapnpjump; first = 0; while(ad1848_isapnp_list[i].card_vendor != 0) { static struct pnp_card *bus = NULL; while ((bus = pnp_find_card( ad1848_isapnp_list[i].card_vendor, ad1848_isapnp_list[i].card_device, bus))) { if(ad1848_isapnp_init(hw_config, bus, i)) { isapnpjump = i; /* start next search from here */ return 0; } } i += reverse ? -1 : 1; } return -ENODEV; } #endif static int __init init_ad1848(void) { printk(KERN_INFO "ad1848/cs4248 codec driver Copyright (C) by Hannu Savolainen 1993-1996\n"); #ifdef CONFIG_PNP if(isapnp && (ad1848_isapnp_probe(&cfg) < 0) ) { printk(KERN_NOTICE "ad1848: No ISAPnP cards found, trying standard ones...\n"); isapnp = 0; } #endif if(io != -1) { struct resource *ports; if( isapnp == 0 ) { if(irq == -1 || dma == -1) { printk(KERN_WARNING "ad1848: must give I/O , IRQ and DMA.\n"); return -EINVAL; } cfg.irq = irq; cfg.io_base = io; cfg.dma = dma; cfg.dma2 = dma2; cfg.card_subtype = type; } ports = request_region(io + 4, 4, "ad1848"); if (!ports) return -EBUSY; if (!request_region(io, 4, "WSS config")) { release_region(io + 4, 4); return -EBUSY; } if (!probe_ms_sound(&cfg, ports)) { release_region(io + 4, 4); release_region(io, 4); return -ENODEV; } attach_ms_sound(&cfg, ports, THIS_MODULE); loaded = 1; } return 0; } static void __exit cleanup_ad1848(void) { if(loaded) unload_ms_sound(&cfg); #ifdef CONFIG_PNP if(ad1848_dev){ if(audio_activated) pnp_device_detach(ad1848_dev); } #endif } module_init(init_ad1848); module_exit(cleanup_ad1848); #ifndef MODULE static int __init setup_ad1848(char *str) { /* io, irq, dma, dma2, type */ int ints[6]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; dma = ints[3]; dma2 = ints[4]; type = ints[5]; return 1; } __setup("ad1848=", setup_ad1848); #endif MODULE_LICENSE("GPL");
gpl-2.0
thicklizard/Sprint-M9
kernel/crash_dump.c
8152
1265
#include <linux/kernel.h> #include <linux/crash_dump.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/export.h> /* * If we have booted due to a crash, max_pfn will be a very low value. We need * to know the amount of memory that the previous kernel used. */ unsigned long saved_max_pfn; /* * stores the physical address of elf header of crash image * * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by * is_kdump_kernel() to determine if we are booting after a panic. Hence put * it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. */ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; /* * stores the size of elf header of crash image */ unsigned long long elfcorehdr_size; /* * elfcorehdr= specifies the location of elf core header stored by the crashed * kernel. This option will be passed by kexec loader to the capture kernel. * * Syntax: elfcorehdr=[size[KMG]@]offset[KMG] */ static int __init setup_elfcorehdr(char *arg) { char *end; if (!arg) return -EINVAL; elfcorehdr_addr = memparse(arg, &end); if (*end == '@') { elfcorehdr_size = elfcorehdr_addr; elfcorehdr_addr = memparse(end + 1, &end); } return end > arg ? 0 : -EINVAL; } early_param("elfcorehdr", setup_elfcorehdr);
gpl-2.0
anbulang/sctp-cmt
drivers/staging/line6/podhd.c
8408
3456
/* * Line6 Pod HD * * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <sound/core.h> #include <sound/pcm.h> #include "audio.h" #include "driver.h" #include "pcm.h" #include "podhd.h" #define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */ static struct snd_ratden podhd_ratden = { .num_min = 48000, .num_max = 48000, .num_step = 1, .den = 1, }; static struct line6_pcm_properties podhd_pcm_properties = { .snd_line6_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | #ifdef CONFIG_PM SNDRV_PCM_INFO_RESUME | #endif SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S24_3LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 60000, .period_bytes_min = 64, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 1024}, .snd_line6_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | #ifdef CONFIG_PM SNDRV_PCM_INFO_RESUME | #endif SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S24_3LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 60000, .period_bytes_min = 64, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 1024}, .snd_line6_rates = { .nrats = 1, .rats = &podhd_ratden}, .bytes_per_frame = PODHD_BYTES_PER_FRAME }; /* POD HD destructor. */ static void podhd_destruct(struct usb_interface *interface) { struct usb_line6_podhd *podhd = usb_get_intfdata(interface); if (podhd == NULL) return; line6_cleanup_audio(&podhd->line6); } /* Try to init POD HD device. */ static int podhd_try_init(struct usb_interface *interface, struct usb_line6_podhd *podhd) { int err; struct usb_line6 *line6 = &podhd->line6; if ((interface == NULL) || (podhd == NULL)) return -ENODEV; /* initialize audio system: */ err = line6_init_audio(line6); if (err < 0) return err; /* initialize MIDI subsystem: */ err = line6_init_midi(line6); if (err < 0) return err; /* initialize PCM subsystem: */ err = line6_init_pcm(line6, &podhd_pcm_properties); if (err < 0) return err; /* register USB audio system: */ err = line6_register_audio(line6); return err; } /* Init POD HD device (and clean up in case of failure). */ int line6_podhd_init(struct usb_interface *interface, struct usb_line6_podhd *podhd) { int err = podhd_try_init(interface, podhd); if (err < 0) podhd_destruct(interface); return err; } /* POD HD device disconnected. */ void line6_podhd_disconnect(struct usb_interface *interface) { struct usb_line6_podhd *podhd; if (interface == NULL) return; podhd = usb_get_intfdata(interface); if (podhd != NULL) { struct snd_line6_pcm *line6pcm = podhd->line6.line6pcm; if (line6pcm != NULL) line6_pcm_disconnect(line6pcm); } podhd_destruct(interface); }
gpl-2.0
Amperific/kernel_tuna_4.3
fs/freevxfs/vxfs_immed.c
13528
3556
/* * Copyright (c) 2000-2001 Christoph Hellwig. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Veritas filesystem driver - support for 'immed' inodes. */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/namei.h> #include "vxfs.h" #include "vxfs_extern.h" #include "vxfs_inode.h" static void * vxfs_immed_follow_link(struct dentry *, struct nameidata *); static int vxfs_immed_readpage(struct file *, struct page *); /* * Inode operations for immed symlinks. * * Unliked all other operations we do not go through the pagecache, * but do all work directly on the inode. */ const struct inode_operations vxfs_immed_symlink_iops = { .readlink = generic_readlink, .follow_link = vxfs_immed_follow_link, }; /* * Address space operations for immed files and directories. */ const struct address_space_operations vxfs_immed_aops = { .readpage = vxfs_immed_readpage, }; /** * vxfs_immed_follow_link - follow immed symlink * @dp: dentry for the link * @np: pathname lookup data for the current path walk * * Description: * vxfs_immed_follow_link restarts the pathname lookup with * the data obtained from @dp. * * Returns: * Zero on success, else a negative error code. */ static void * vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np) { struct vxfs_inode_info *vip = VXFS_INO(dp->d_inode); nd_set_link(np, vip->vii_immed.vi_immed); return NULL; } /** * vxfs_immed_readpage - read part of an immed inode into pagecache * @file: file context (unused) * @page: page frame to fill in. * * Description: * vxfs_immed_readpage reads a part of the immed area of the * file that hosts @pp into the pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: * @page is locked and will be unlocked. */ static int vxfs_immed_readpage(struct file *fp, struct page *pp) { struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; caddr_t kaddr; kaddr = kmap(pp); memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); kunmap(pp); flush_dcache_page(pp); SetPageUptodate(pp); unlock_page(pp); return 0; }
gpl-2.0
sh95119/linux
drivers/crypto/qat/qat_common/qat_asym_algs.c
217
17830
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Contact Information: qat-linux@intel.com BSD LICENSE Copyright(c) 2014 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> #include <linux/dma-mapping.h> #include <linux/fips.h> #include "qat_rsakey-asn1.h" #include "icp_qat_fw_pke.h" #include "adf_accel_devices.h" #include "adf_transport.h" #include "adf_common_drv.h" #include "qat_crypto.h" static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; struct qat_rsa_input_params { union { struct { dma_addr_t m; dma_addr_t e; dma_addr_t n; } enc; struct { dma_addr_t c; dma_addr_t d; dma_addr_t n; } dec; u64 in_tab[8]; }; } __packed __aligned(64); struct qat_rsa_output_params { union { struct { dma_addr_t c; } enc; struct { dma_addr_t m; } dec; u64 out_tab[8]; }; } __packed __aligned(64); struct qat_rsa_ctx { char *n; char *e; char *d; dma_addr_t dma_n; dma_addr_t dma_e; dma_addr_t dma_d; unsigned int key_sz; struct qat_crypto_instance *inst; } __packed __aligned(64); struct qat_rsa_request { struct qat_rsa_input_params in; struct qat_rsa_output_params out; dma_addr_t phy_in; dma_addr_t phy_out; char *src_align; struct icp_qat_fw_pke_request req; struct qat_rsa_ctx *ctx; int err; } __aligned(64); static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) { struct akcipher_request *areq = (void *)(__force long)resp->opaque; struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( resp->pke_resp_hdr.comn_resp_flags); char *ptr = areq->dst; err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; if (req->src_align) dma_free_coherent(dev, req->ctx->key_sz, req->src_align, req->in.enc.m); else dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, DMA_TO_DEVICE); dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, DMA_FROM_DEVICE); dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); areq->dst_len = req->ctx->key_sz; /* Need to set the corect length of the output */ while (!(*ptr) && areq->dst_len) { areq->dst_len--; ptr++; } if (areq->dst_len != req->ctx->key_sz) memmove(areq->dst, ptr, areq->dst_len); akcipher_request_complete(areq, err); } void qat_alg_asym_callback(void *_resp) { struct icp_qat_fw_pke_resp *resp = _resp; qat_rsa_cb(resp); } #define PKE_RSA_EP_512 0x1c161b21 #define PKE_RSA_EP_1024 0x35111bf7 #define PKE_RSA_EP_1536 0x4d111cdc #define PKE_RSA_EP_2048 0x6e111dba #define PKE_RSA_EP_3072 0x7d111ea3 #define PKE_RSA_EP_4096 0xa5101f7e static unsigned long qat_rsa_enc_fn_id(unsigned int len) { unsigned int bitslen = len << 3; switch (bitslen) { case 512: return PKE_RSA_EP_512; case 1024: return PKE_RSA_EP_1024; case 1536: return PKE_RSA_EP_1536; case 2048: return PKE_RSA_EP_2048; case 3072: return PKE_RSA_EP_3072; case 4096: return PKE_RSA_EP_4096; default: return 0; }; } #define PKE_RSA_DP1_512 0x1c161b3c #define PKE_RSA_DP1_1024 0x35111c12 #define PKE_RSA_DP1_1536 0x4d111cf7 #define PKE_RSA_DP1_2048 0x6e111dda #define PKE_RSA_DP1_3072 0x7d111ebe #define PKE_RSA_DP1_4096 0xa5101f98 static unsigned long qat_rsa_dec_fn_id(unsigned int len) { unsigned int bitslen = len << 3; switch (bitslen) { case 512: return PKE_RSA_DP1_512; case 1024: return PKE_RSA_DP1_1024; case 1536: return PKE_RSA_DP1_1536; case 2048: return PKE_RSA_DP1_2048; case 3072: return PKE_RSA_DP1_3072; case 4096: return PKE_RSA_DP1_4096; default: return 0; }; } static int qat_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); struct qat_rsa_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; if (unlikely(!ctx->n || !ctx->e)) return -EINVAL; if (req->dst_len < ctx->key_sz) { req->dst_len = ctx->key_sz; return -EOVERFLOW; } memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; qat_req->ctx = ctx; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); qat_req->in.enc.e = ctx->dma_e; qat_req->in.enc.n = ctx->dma_n; ret = -ENOMEM; /* * src can be of any size in valid range, but HW expects it to be the * same as modulo n so in case it is different we need to allocate a * new buf and copy src data. * In other case we just need to map the user provided buffer. */ if (req->src_len < ctx->key_sz) { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, &qat_req->in.enc.m, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; memcpy(qat_req->src_align + shift, req->src, req->src_len); } else { qat_req->src_align = NULL; qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, DMA_TO_DEVICE); } qat_req->in.in_tab[3] = 0; qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, DMA_FROM_DEVICE); qat_req->out.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely((!qat_req->src_align && dma_mapping_error(dev, qat_req->in.enc.m)) || dma_mapping_error(dev, qat_req->out.enc.c) || dma_mapping_error(dev, qat_req->phy_in) || dma_mapping_error(dev, qat_req->phy_out))) goto unmap; msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; msg->pke_mid.opaque = (uint64_t)(__force long)req; msg->input_param_count = 3; msg->output_param_count = 1; do { ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) return -EINPROGRESS; unmap: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, qat_req->in.enc.m); else if (!dma_mapping_error(dev, qat_req->in.enc.m)) dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->out.enc.c)) dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, DMA_FROM_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_in)) dma_unmap_single(dev, qat_req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); return ret; } static int qat_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); struct qat_rsa_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; if (unlikely(!ctx->n || !ctx->d)) return -EINVAL; if (req->dst_len < ctx->key_sz) { req->dst_len = ctx->key_sz; return -EOVERFLOW; } memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; qat_req->ctx = ctx; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); qat_req->in.dec.d = ctx->dma_d; qat_req->in.dec.n = ctx->dma_n; ret = -ENOMEM; /* * src can be of any size in valid range, but HW expects it to be the * same as modulo n so in case it is different we need to allocate a * new buf and copy src data. * In other case we just need to map the user provided buffer. */ if (req->src_len < ctx->key_sz) { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, &qat_req->in.dec.c, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; memcpy(qat_req->src_align + shift, req->src, req->src_len); } else { qat_req->src_align = NULL; qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, DMA_TO_DEVICE); } qat_req->in.in_tab[3] = 0; qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, DMA_FROM_DEVICE); qat_req->out.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely((!qat_req->src_align && dma_mapping_error(dev, qat_req->in.dec.c)) || dma_mapping_error(dev, qat_req->out.dec.m) || dma_mapping_error(dev, qat_req->phy_in) || dma_mapping_error(dev, qat_req->phy_out))) goto unmap; msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; msg->pke_mid.opaque = (uint64_t)(__force long)req; msg->input_param_count = 3; msg->output_param_count = 1; do { ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) return -EINPROGRESS; unmap: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, qat_req->in.dec.c); else if (!dma_mapping_error(dev, qat_req->in.dec.c)) dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->out.dec.m)) dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, DMA_FROM_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_in)) dma_unmap_single(dev, qat_req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); return ret; } int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ctx->key_sz = vlen; ret = -EINVAL; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; ret = -ENOMEM; ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->n, ptr, ctx->key_sz); return 0; err: ctx->key_sz = 0; ctx->n = NULL; return ret; } int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; while (!*ptr && vlen) { ptr++; vlen--; } if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { ctx->e = NULL; return -EINVAL; } ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) { ctx->e = NULL; return -ENOMEM; } memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); return 0; } int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (vlen != 256 && vlen != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; } static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); int ret; /* Free the old key if any */ if (ctx->n) dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); if (ctx->e) dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } ctx->n = NULL; ctx->e = NULL; ctx->d = NULL; ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); if (ret < 0) goto free; if (!ctx->n || !ctx->e) { /* invalid key provided */ ret = -EINVAL; goto free; } return 0; free: if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); ctx->d = NULL; } if (ctx->e) { dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); ctx->e = NULL; } if (ctx->n) { dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); ctx->n = NULL; ctx->key_sz = 0; } return ret; } static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(get_current_node()); if (!inst) return -EINVAL; ctx->key_sz = 0; ctx->inst = inst; return 0; } static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); if (ctx->n) dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); if (ctx->e) dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } qat_crypto_put_instance(ctx->inst); ctx->n = NULL; ctx->d = NULL; ctx->d = NULL; } static struct akcipher_alg rsa = { .encrypt = qat_rsa_enc, .decrypt = qat_rsa_dec, .sign = qat_rsa_dec, .verify = qat_rsa_enc, .setkey = qat_rsa_setkey, .init = qat_rsa_init_tfm, .exit = qat_rsa_exit_tfm, .reqsize = sizeof(struct qat_rsa_request) + 64, .base = { .cra_name = "rsa", .cra_driver_name = "qat-rsa", .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_rsa_ctx), }, }; int qat_asym_algs_register(void) { int ret = 0; mutex_lock(&algs_lock); if (++active_devs == 1) { rsa.base.cra_flags = 0; ret = crypto_register_akcipher(&rsa); } mutex_unlock(&algs_lock); return ret; } void qat_asym_algs_unregister(void) { mutex_lock(&algs_lock); if (--active_devs == 0) crypto_unregister_akcipher(&rsa); mutex_unlock(&algs_lock); }
gpl-2.0
kbukin1/pnotify-linux-4.1.6
drivers/dma/s3c24xx-dma.c
217
38407
/* * S3C24XX DMA handling * * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> * * based on amba-pl08x.c * * Copyright (c) 2006 ARM Ltd. * Copyright (c) 2010 ST-Ericsson SA * * Author: Peter Pearse <peter.pearse@arm.com> * Author: Linus Walleij <linus.walleij@stericsson.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals * that can be routed to any of the 4 to 8 hardware-channels. * * Therefore on these DMA controllers the number of channels * and the number of incoming DMA signals are two totally different things. * It is usually not possible to theoretically handle all physical signals, * so a multiplexing scheme with possible denial of use is necessary. * * Open items: * - bursts */ #include <linux/platform_device.h> #include <linux/types.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_data/dma-s3c24xx.h> #include "dmaengine.h" #include "virt-dma.h" #define MAX_DMA_CHANNELS 8 #define S3C24XX_DISRC 0x00 #define S3C24XX_DISRCC 0x04 #define S3C24XX_DISRCC_INC_INCREMENT 0 #define S3C24XX_DISRCC_INC_FIXED BIT(0) #define S3C24XX_DISRCC_LOC_AHB 0 #define S3C24XX_DISRCC_LOC_APB BIT(1) #define S3C24XX_DIDST 0x08 #define S3C24XX_DIDSTC 0x0c #define S3C24XX_DIDSTC_INC_INCREMENT 0 #define S3C24XX_DIDSTC_INC_FIXED BIT(0) #define S3C24XX_DIDSTC_LOC_AHB 0 #define S3C24XX_DIDSTC_LOC_APB BIT(1) #define S3C24XX_DIDSTC_INT_TC0 0 #define S3C24XX_DIDSTC_INT_RELOAD BIT(2) #define S3C24XX_DCON 0x10 #define S3C24XX_DCON_TC_MASK 0xfffff #define S3C24XX_DCON_DSZ_BYTE (0 << 20) #define S3C24XX_DCON_DSZ_HALFWORD (1 << 20) #define S3C24XX_DCON_DSZ_WORD (2 << 20) #define S3C24XX_DCON_DSZ_MASK (3 << 20) #define S3C24XX_DCON_DSZ_SHIFT 20 #define S3C24XX_DCON_AUTORELOAD 0 #define S3C24XX_DCON_NORELOAD BIT(22) #define S3C24XX_DCON_HWTRIG BIT(23) #define S3C24XX_DCON_HWSRC_SHIFT 24 #define S3C24XX_DCON_SERV_SINGLE 0 #define S3C24XX_DCON_SERV_WHOLE BIT(27) #define S3C24XX_DCON_TSZ_UNIT 0 #define S3C24XX_DCON_TSZ_BURST4 BIT(28) #define S3C24XX_DCON_INT BIT(29) #define S3C24XX_DCON_SYNC_PCLK 0 #define S3C24XX_DCON_SYNC_HCLK BIT(30) #define S3C24XX_DCON_DEMAND 0 #define S3C24XX_DCON_HANDSHAKE BIT(31) #define S3C24XX_DSTAT 0x14 #define S3C24XX_DSTAT_STAT_BUSY BIT(20) #define S3C24XX_DSTAT_CURRTC_MASK 0xfffff #define S3C24XX_DMASKTRIG 0x20 #define S3C24XX_DMASKTRIG_SWTRIG BIT(0) #define S3C24XX_DMASKTRIG_ON BIT(1) #define S3C24XX_DMASKTRIG_STOP BIT(2) #define S3C24XX_DMAREQSEL 0x24 #define S3C24XX_DMAREQSEL_HW BIT(0) /* * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel * for a DMA source. Instead only specific channels are valid. * All of these SoCs have 4 physical channels and the number of request * source bits is 3. Additionally we also need 1 bit to mark the channel * as valid. * Therefore we separate the chansel element of the channel data into 4 * parts of 4 bits each, to hold the information if the channel is valid * and the hw request source to use. * * Example: * SDI is valid on channels 0, 2 and 3 - with varying hw request sources. * For it the chansel field would look like * * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2 * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2 */ #define S3C24XX_CHANSEL_WIDTH 4 #define S3C24XX_CHANSEL_VALID BIT(3) #define S3C24XX_CHANSEL_REQ_MASK 7 /* * struct soc_data - vendor-specific config parameters for individual SoCs * @stride: spacing between the registers of each channel * @has_reqsel: does the controller use the newer requestselection mechanism * @has_clocks: are controllable dma-clocks present */ struct soc_data { int stride; bool has_reqsel; bool has_clocks; }; /* * enum s3c24xx_dma_chan_state - holds the virtual channel states * @S3C24XX_DMA_CHAN_IDLE: the channel is idle * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport * channel and is running a transfer on it * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport * channel to become available (only pertains to memcpy channels) */ enum s3c24xx_dma_chan_state { S3C24XX_DMA_CHAN_IDLE, S3C24XX_DMA_CHAN_RUNNING, S3C24XX_DMA_CHAN_WAITING, }; /* * struct s3c24xx_sg - structure containing data per sg * @src_addr: src address of sg * @dst_addr: dst address of sg * @len: transfer len in bytes * @node: node for txd's dsg_list */ struct s3c24xx_sg { dma_addr_t src_addr; dma_addr_t dst_addr; size_t len; struct list_head node; }; /* * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor * @vd: virtual DMA descriptor * @dsg_list: list of children sg's * @at: sg currently being transfered * @width: transfer width * @disrcc: value for source control register * @didstc: value for destination control register * @dcon: base value for dcon register * @cyclic: indicate cyclic transfer */ struct s3c24xx_txd { struct virt_dma_desc vd; struct list_head dsg_list; struct list_head *at; u8 width; u32 disrcc; u32 didstc; u32 dcon; bool cyclic; }; struct s3c24xx_dma_chan; /* * struct s3c24xx_dma_phy - holder for the physical channels * @id: physical index to this channel * @valid: does the channel have all required elements * @base: virtual memory base (remapped) for the this channel * @irq: interrupt for this channel * @clk: clock for this channel * @lock: a lock to use when altering an instance of this struct * @serving: virtual channel currently being served by this physicalchannel * @host: a pointer to the host (internal use) */ struct s3c24xx_dma_phy { unsigned int id; bool valid; void __iomem *base; int irq; struct clk *clk; spinlock_t lock; struct s3c24xx_dma_chan *serving; struct s3c24xx_dma_engine *host; }; /* * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel * @id: the id of the channel * @name: name of the channel * @vc: wrappped virtual channel * @phy: the physical channel utilized by this channel, if there is one * @runtime_addr: address for RX/TX according to the runtime config * @at: active transaction on this channel * @lock: a lock for this channel data * @host: a pointer to the host (internal use) * @state: whether the channel is idle, running etc * @slave: whether this channel is a device (slave) or for memcpy */ struct s3c24xx_dma_chan { int id; const char *name; struct virt_dma_chan vc; struct s3c24xx_dma_phy *phy; struct dma_slave_config cfg; struct s3c24xx_txd *at; struct s3c24xx_dma_engine *host; enum s3c24xx_dma_chan_state state; bool slave; }; /* * struct s3c24xx_dma_engine - the local state holder for the S3C24XX * @pdev: the corresponding platform device * @pdata: platform data passed in from the platform/machine * @base: virtual memory base (remapped) * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance * @phy_chans: array of data for the physical channels */ struct s3c24xx_dma_engine { struct platform_device *pdev; const struct s3c24xx_dma_platdata *pdata; struct soc_data *sdata; void __iomem *base; struct dma_device slave; struct dma_device memcpy; struct s3c24xx_dma_phy *phy_chans; }; /* * Physical channel handling */ /* * Check whether a certain channel is busy or not. */ static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy) { unsigned int val = readl(phy->base + S3C24XX_DSTAT); return val & S3C24XX_DSTAT_STAT_BUSY; } static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan, struct s3c24xx_dma_phy *phy) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; int phyvalid; /* every phy is valid for memcopy channels */ if (!s3cchan->slave) return true; /* On newer variants all phys can be used for all virtual channels */ if (s3cdma->sdata->has_reqsel) return true; phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false; } /* * Allocate a physical channel for a virtual channel * * Try to locate a physical channel to be used for this transfer. If all * are taken return NULL and the requester will have to cope by using * some fallback PIO mode or retrying later. */ static struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; struct s3c24xx_dma_channel *cdata; struct s3c24xx_dma_phy *phy = NULL; unsigned long flags; int i; int ret; if (s3cchan->slave) cdata = &pdata->channels[s3cchan->id]; for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { phy = &s3cdma->phy_chans[i]; if (!phy->valid) continue; if (!s3c24xx_dma_phy_valid(s3cchan, phy)) continue; spin_lock_irqsave(&phy->lock, flags); if (!phy->serving) { phy->serving = s3cchan; spin_unlock_irqrestore(&phy->lock, flags); break; } spin_unlock_irqrestore(&phy->lock, flags); } /* No physical channel available, cope with it */ if (i == s3cdma->pdata->num_phy_channels) { dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); return NULL; } /* start the phy clock */ if (s3cdma->sdata->has_clocks) { ret = clk_enable(phy->clk); if (ret) { dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", phy->id, ret); phy->serving = NULL; return NULL; } } return phy; } /* * Mark the physical channel as free. * * This drops the link between the physical and virtual channel. */ static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy) { struct s3c24xx_dma_engine *s3cdma = phy->host; if (s3cdma->sdata->has_clocks) clk_disable(phy->clk); phy->serving = NULL; } /* * Stops the channel by writing the stop bit. * This should not be used for an on-going transfer, but as a method of * shutting down a channel (eg, when it's no longer used) or terminating a * transfer. */ static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy) { writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); } /* * Virtual channel handling */ static inline struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan) { return container_of(chan, struct s3c24xx_dma_chan, vc.chan); } static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_phy *phy = s3cchan->phy; struct s3c24xx_txd *txd = s3cchan->at; u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; return tc * txd->width; } static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); unsigned long flags; int ret = 0; /* Reject definitely invalid configurations */ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; spin_lock_irqsave(&s3cchan->vc.lock, flags); if (!s3cchan->slave) { ret = -EINVAL; goto out; } s3cchan->cfg = *config; out: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } /* * Transfer handling */ static inline struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx) { return container_of(tx, struct s3c24xx_txd, vd.tx); } static struct s3c24xx_txd *s3c24xx_dma_get_txd(void) { struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { INIT_LIST_HEAD(&txd->dsg_list); txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; } return txd; } static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd) { struct s3c24xx_sg *dsg, *_dsg; list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); } kfree(txd); } static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan, struct s3c24xx_txd *txd) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_dma_phy *phy = s3cchan->phy; const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); u32 dcon = txd->dcon; u32 val; /* transfer-size and -count from len and width */ switch (txd->width) { case 1: dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; break; case 2: dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); break; case 4: dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); break; } if (s3cchan->slave) { struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; if (s3cdma->sdata->has_reqsel) { writel_relaxed((cdata->chansel << 1) | S3C24XX_DMAREQSEL_HW, phy->base + S3C24XX_DMAREQSEL); } else { int csel = cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH); csel &= S3C24XX_CHANSEL_REQ_MASK; dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT; dcon |= S3C24XX_DCON_HWTRIG; } } else { if (s3cdma->sdata->has_reqsel) writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); } writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); writel_relaxed(dcon, phy->base + S3C24XX_DCON); val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); val &= ~S3C24XX_DMASKTRIG_STOP; val |= S3C24XX_DMASKTRIG_ON; /* trigger the dma operation for memcpy transfers */ if (!s3cchan->slave) val |= S3C24XX_DMASKTRIG_SWTRIG; writel(val, phy->base + S3C24XX_DMASKTRIG); } /* * Set the initial DMA register values and start first sg. */ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_phy *phy = s3cchan->phy; struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); list_del(&txd->vd.node); s3cchan->at = txd; /* Wait for channel inactive */ while (s3c24xx_dma_phy_busy(phy)) cpu_relax(); /* point to the first element of the sg list */ txd->at = txd->dsg_list.next; s3c24xx_dma_start_next_sg(s3cchan, txd); } static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, struct s3c24xx_dma_chan *s3cchan) { LIST_HEAD(head); vchan_get_all_descriptors(&s3cchan->vc, &head); vchan_dma_desc_free_list(&s3cchan->vc, &head); } /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The * virtual channel lock must be held at this point. */ static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_dma_phy *phy; phy = s3c24xx_dma_get_phy(s3cchan); if (!phy) { dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", s3cchan->name); s3cchan->state = S3C24XX_DMA_CHAN_WAITING; return; } dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", phy->id, s3cchan->name); s3cchan->phy = phy; s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; s3c24xx_dma_start_next_txd(s3cchan); } static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy, struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", phy->id, s3cchan->name); /* * We do this without taking the lock; we're really only concerned * about whether this pointer is NULL or not, and we're guaranteed * that this will only be called when it _already_ is non-NULL. */ phy->serving = s3cchan; s3cchan->phy = phy; s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; s3c24xx_dma_start_next_txd(s3cchan); } /* * Free a physical DMA channel, potentially reallocating it to another * virtual channel if we have any pending. */ static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan) { struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_dma_chan *p, *next; retry: next = NULL; /* Find a waiting virtual channel for the next transfer. */ list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) if (p->state == S3C24XX_DMA_CHAN_WAITING) { next = p; break; } if (!next) { list_for_each_entry(p, &s3cdma->slave.channels, vc.chan.device_node) if (p->state == S3C24XX_DMA_CHAN_WAITING && s3c24xx_dma_phy_valid(p, s3cchan->phy)) { next = p; break; } } /* Ensure that the physical channel is stopped */ s3c24xx_dma_terminate_phy(s3cchan->phy); if (next) { bool success; /* * Eww. We know this isn't going to deadlock * but lockdep probably doesn't. */ spin_lock(&next->vc.lock); /* Re-check the state now that we have the lock */ success = next->state == S3C24XX_DMA_CHAN_WAITING; if (success) s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); spin_unlock(&next->vc.lock); /* If the state changed, try to find another channel */ if (!success) goto retry; } else { /* No more jobs, so free up the physical channel */ s3c24xx_dma_put_phy(s3cchan->phy); } s3cchan->phy = NULL; s3cchan->state = S3C24XX_DMA_CHAN_IDLE; } static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) { struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); if (!s3cchan->slave) dma_descriptor_unmap(&vd->tx); s3c24xx_dma_free_txd(txd); } static irqreturn_t s3c24xx_dma_irq(int irq, void *data) { struct s3c24xx_dma_phy *phy = data; struct s3c24xx_dma_chan *s3cchan = phy->serving; struct s3c24xx_txd *txd; dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); /* * Interrupts happen to notify the completion of a transfer and the * channel should have moved into its stop state already on its own. * Therefore interrupts on channels not bound to a virtual channel * should never happen. Nevertheless send a terminate command to the * channel if the unlikely case happens. */ if (unlikely(!s3cchan)) { dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", phy->id); s3c24xx_dma_terminate_phy(phy); return IRQ_HANDLED; } spin_lock(&s3cchan->vc.lock); txd = s3cchan->at; if (txd) { /* when more sg's are in this txd, start the next one */ if (!list_is_last(txd->at, &txd->dsg_list)) { txd->at = txd->at->next; if (txd->cyclic) vchan_cyclic_callback(&txd->vd); s3c24xx_dma_start_next_sg(s3cchan, txd); } else if (!txd->cyclic) { s3cchan->at = NULL; vchan_cookie_complete(&txd->vd); /* * And start the next descriptor (if any), * otherwise free this channel. */ if (vchan_next_desc(&s3cchan->vc)) s3c24xx_dma_start_next_txd(s3cchan); else s3c24xx_dma_phy_free(s3cchan); } else { vchan_cyclic_callback(&txd->vd); /* Cyclic: reset at beginning */ txd->at = txd->dsg_list.next; s3c24xx_dma_start_next_sg(s3cchan, txd); } } spin_unlock(&s3cchan->vc.lock); return IRQ_HANDLED; } /* * The DMA ENGINE API */ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; unsigned long flags; int ret = 0; spin_lock_irqsave(&s3cchan->vc.lock, flags); if (!s3cchan->phy && !s3cchan->at) { dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", s3cchan->id); ret = -EINVAL; goto unlock; } s3cchan->state = S3C24XX_DMA_CHAN_IDLE; /* Mark physical channel as free */ if (s3cchan->phy) s3c24xx_dma_phy_free(s3cchan); /* Dequeue current job */ if (s3cchan->at) { s3c24xx_dma_desc_free(&s3cchan->at->vd); s3cchan->at = NULL; } /* Dequeue jobs not yet fired as well */ s3c24xx_dma_free_txd_list(s3cdma, s3cchan); unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) { /* Ensure all queued descriptors are freed */ vchan_free_chan_resources(to_virt_chan(chan)); } static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_txd *txd; struct s3c24xx_sg *dsg; struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; size_t bytes = 0; spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } /* * There's no point calculating the residue if there's * no txstate to store the value. */ if (!txstate) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } vd = vchan_find_desc(&s3cchan->vc, cookie); if (vd) { /* On the issued list, so hasn't been processed yet */ txd = to_s3c24xx_txd(&vd->tx); list_for_each_entry(dsg, &txd->dsg_list, node) bytes += dsg->len; } else { /* * Currently running, so sum over the pending sg's and * the currently active one. */ txd = s3cchan->at; dsg = list_entry(txd->at, struct s3c24xx_sg, node); list_for_each_entry_from(dsg, &txd->dsg_list, node) bytes += dsg->len; bytes += s3c24xx_dma_getbytes_chan(s3cchan); } spin_unlock_irqrestore(&s3cchan->vc.lock, flags); /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ dma_set_residue(txstate, bytes); /* Whether waiting or running, we're in progress */ return ret; } /* * Initialize a descriptor to be used by memcpy submit */ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_txd *txd; struct s3c24xx_sg *dsg; int src_mod, dest_mod; dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n", len, s3cchan->name); if ((len & S3C24XX_DCON_TC_MASK) != len) { dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len); return NULL; } txd = s3c24xx_dma_get_txd(); if (!txd) return NULL; dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); if (!dsg) { s3c24xx_dma_free_txd(txd); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* * Determine a suitable transfer width. * The DMA controller cannot fetch/store information which is not * naturally aligned on the bus, i.e., a 4 byte fetch must start at * an address divisible by 4 - more generally addr % width must be 0. */ src_mod = src % 4; dest_mod = dest % 4; switch (len % 4) { case 0: txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; break; case 2: txd->width = ((src_mod == 2 || src_mod == 0) && (dest_mod == 2 || dest_mod == 0)) ? 2 : 1; break; default: txd->width = 1; break; } txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | S3C24XX_DCON_SERV_WHOLE; return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, enum dma_transfer_direction direction, unsigned long flags) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; struct s3c24xx_txd *txd; struct s3c24xx_sg *dsg; unsigned sg_len; dma_addr_t slave_addr; u32 hwcfg = 0; int i; dev_dbg(&s3cdma->pdev->dev, "prepare cyclic transaction of %zu bytes with period %zu from %s\n", size, period, s3cchan->name); if (!is_slave_direction(direction)) { dev_err(&s3cdma->pdev->dev, "direction %d unsupported\n", direction); return NULL; } txd = s3c24xx_dma_get_txd(); if (!txd) return NULL; txd->cyclic = 1; if (cdata->handshake) txd->dcon |= S3C24XX_DCON_HANDSHAKE; switch (cdata->bus) { case S3C24XX_DMA_APB: txd->dcon |= S3C24XX_DCON_SYNC_PCLK; hwcfg |= S3C24XX_DISRCC_LOC_APB; break; case S3C24XX_DMA_AHB: txd->dcon |= S3C24XX_DCON_SYNC_HCLK; hwcfg |= S3C24XX_DISRCC_LOC_AHB; break; } /* * Always assume our peripheral desintation is a fixed * address in memory. */ hwcfg |= S3C24XX_DISRCC_INC_FIXED; /* * Individual dma operations are requested by the slave, * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). */ txd->dcon |= S3C24XX_DCON_SERV_SINGLE; if (direction == DMA_MEM_TO_DEV) { txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; txd->didstc = hwcfg; slave_addr = s3cchan->cfg.dst_addr; txd->width = s3cchan->cfg.dst_addr_width; } else { txd->disrcc = hwcfg; txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; slave_addr = s3cchan->cfg.src_addr; txd->width = s3cchan->cfg.src_addr_width; } sg_len = size / period; for (i = 0; i < sg_len; i++) { dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); if (!dsg) { s3c24xx_dma_free_txd(txd); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); dsg->len = period; /* Check last period length */ if (i == sg_len - 1) dsg->len = size - period * i; if (direction == DMA_MEM_TO_DEV) { dsg->src_addr = addr + period * i; dsg->dst_addr = slave_addr; } else { /* DMA_DEV_TO_MEM */ dsg->src_addr = slave_addr; dsg->dst_addr = addr + period * i; } } return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; struct s3c24xx_txd *txd; struct s3c24xx_sg *dsg; struct scatterlist *sg; dma_addr_t slave_addr; u32 hwcfg = 0; int tmp; dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", sg_dma_len(sgl), s3cchan->name); txd = s3c24xx_dma_get_txd(); if (!txd) return NULL; if (cdata->handshake) txd->dcon |= S3C24XX_DCON_HANDSHAKE; switch (cdata->bus) { case S3C24XX_DMA_APB: txd->dcon |= S3C24XX_DCON_SYNC_PCLK; hwcfg |= S3C24XX_DISRCC_LOC_APB; break; case S3C24XX_DMA_AHB: txd->dcon |= S3C24XX_DCON_SYNC_HCLK; hwcfg |= S3C24XX_DISRCC_LOC_AHB; break; } /* * Always assume our peripheral desintation is a fixed * address in memory. */ hwcfg |= S3C24XX_DISRCC_INC_FIXED; /* * Individual dma operations are requested by the slave, * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). */ txd->dcon |= S3C24XX_DCON_SERV_SINGLE; if (direction == DMA_MEM_TO_DEV) { txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; txd->didstc = hwcfg; slave_addr = s3cchan->cfg.dst_addr; txd->width = s3cchan->cfg.dst_addr_width; } else if (direction == DMA_DEV_TO_MEM) { txd->disrcc = hwcfg; txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; slave_addr = s3cchan->cfg.src_addr; txd->width = s3cchan->cfg.src_addr_width; } else { s3c24xx_dma_free_txd(txd); dev_err(&s3cdma->pdev->dev, "direction %d unsupported\n", direction); return NULL; } for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); if (!dsg) { s3c24xx_dma_free_txd(txd); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); dsg->len = sg_dma_len(sg); if (direction == DMA_MEM_TO_DEV) { dsg->src_addr = sg_dma_address(sg); dsg->dst_addr = slave_addr; } else { /* DMA_DEV_TO_MEM */ dsg->src_addr = slave_addr; dsg->dst_addr = sg_dma_address(sg); } } return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); } /* * Slave transactions callback to the slave device to allow * synchronization of slave DMA signals with the DMAC enable */ static void s3c24xx_dma_issue_pending(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&s3cchan->vc.lock, flags); if (vchan_issue_pending(&s3cchan->vc)) { if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) s3c24xx_dma_phy_alloc_and_start(s3cchan); } spin_unlock_irqrestore(&s3cchan->vc.lock, flags); } /* * Bringup and teardown */ /* * Initialise the DMAC memcpy/slave channels. * Make a local wrapper to hold required data */ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, struct dma_device *dmadev, unsigned int channels, bool slave) { struct s3c24xx_dma_chan *chan; int i; INIT_LIST_HEAD(&dmadev->channels); /* * Register as many many memcpy as we have physical channels, * we won't always be able to use all but the code will have * to cope with that situation. */ for (i = 0; i < channels; i++) { chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); if (!chan) { dev_err(dmadev->dev, "%s no memory for channel\n", __func__); return -ENOMEM; } chan->id = i; chan->host = s3cdma; chan->state = S3C24XX_DMA_CHAN_IDLE; if (slave) { chan->slave = true; chan->name = kasprintf(GFP_KERNEL, "slave%d", i); if (!chan->name) return -ENOMEM; } else { chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); if (!chan->name) return -ENOMEM; } dev_dbg(dmadev->dev, "initialize virtual channel \"%s\"\n", chan->name); chan->vc.desc_free = s3c24xx_dma_desc_free; vchan_init(&chan->vc, dmadev); } dev_info(dmadev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); return i; } static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) { struct s3c24xx_dma_chan *chan = NULL; struct s3c24xx_dma_chan *next; list_for_each_entry_safe(chan, next, &dmadev->channels, vc.chan.device_node) list_del(&chan->vc.chan.device_node); } /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ static struct soc_data soc_s3c2410 = { .stride = 0x40, .has_reqsel = false, .has_clocks = false, }; /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */ static struct soc_data soc_s3c2412 = { .stride = 0x40, .has_reqsel = true, .has_clocks = true, }; /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */ static struct soc_data soc_s3c2443 = { .stride = 0x100, .has_reqsel = true, .has_clocks = true, }; static struct platform_device_id s3c24xx_dma_driver_ids[] = { { .name = "s3c2410-dma", .driver_data = (kernel_ulong_t)&soc_s3c2410, }, { .name = "s3c2412-dma", .driver_data = (kernel_ulong_t)&soc_s3c2412, }, { .name = "s3c2443-dma", .driver_data = (kernel_ulong_t)&soc_s3c2443, }, { }, }; static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev) { return (struct soc_data *) platform_get_device_id(pdev)->driver_data; } static int s3c24xx_dma_probe(struct platform_device *pdev) { const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); struct s3c24xx_dma_engine *s3cdma; struct soc_data *sdata; struct resource *res; int ret; int i; if (!pdata) { dev_err(&pdev->dev, "platform data missing\n"); return -ENODEV; } /* Basic sanity check */ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { dev_err(&pdev->dev, "to many dma channels %d, max %d\n", pdata->num_phy_channels, MAX_DMA_CHANNELS); return -EINVAL; } sdata = s3c24xx_dma_get_soc_data(pdev); if (!sdata) return -EINVAL; s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); if (!s3cdma) return -ENOMEM; s3cdma->pdev = pdev; s3cdma->pdata = pdata; s3cdma->sdata = sdata; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); s3cdma->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(s3cdma->base)) return PTR_ERR(s3cdma->base); s3cdma->phy_chans = devm_kzalloc(&pdev->dev, sizeof(struct s3c24xx_dma_phy) * pdata->num_phy_channels, GFP_KERNEL); if (!s3cdma->phy_chans) return -ENOMEM; /* acquire irqs and clocks for all physical channels */ for (i = 0; i < pdata->num_phy_channels; i++) { struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; char clk_name[6]; phy->id = i; phy->base = s3cdma->base + (i * sdata->stride); phy->host = s3cdma; phy->irq = platform_get_irq(pdev, i); if (phy->irq < 0) { dev_err(&pdev->dev, "failed to get irq %d, err %d\n", i, phy->irq); continue; } ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, 0, pdev->name, phy); if (ret) { dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", i, ret); continue; } if (sdata->has_clocks) { sprintf(clk_name, "dma.%d", i); phy->clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(phy->clk) && sdata->has_clocks) { dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n", i, PTR_ERR(phy->clk)); continue; } ret = clk_prepare(phy->clk); if (ret) { dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", i, ret); continue; } } spin_lock_init(&phy->lock); phy->valid = true; dev_dbg(&pdev->dev, "physical channel %d is %s\n", i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE"); } /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); s3cdma->memcpy.dev = &pdev->dev; s3cdma->memcpy.device_free_chan_resources = s3c24xx_dma_free_chan_resources; s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; /* Initialize slave engine for SoC internal dedicated peripherals */ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); s3cdma->slave.dev = &pdev->dev; s3cdma->slave.device_free_chan_resources = s3c24xx_dma_free_chan_resources; s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; /* Register as many memcpy channels as there are physical channels */ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, pdata->num_phy_channels, false); if (ret <= 0) { dev_warn(&pdev->dev, "%s failed to enumerate memcpy channels - %d\n", __func__, ret); goto err_memcpy; } /* Register slave channels */ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, pdata->num_channels, true); if (ret <= 0) { dev_warn(&pdev->dev, "%s failed to enumerate slave channels - %d\n", __func__, ret); goto err_slave; } ret = dma_async_device_register(&s3cdma->memcpy); if (ret) { dev_warn(&pdev->dev, "%s failed to register memcpy as an async device - %d\n", __func__, ret); goto err_memcpy_reg; } ret = dma_async_device_register(&s3cdma->slave); if (ret) { dev_warn(&pdev->dev, "%s failed to register slave as an async device - %d\n", __func__, ret); goto err_slave_reg; } platform_set_drvdata(pdev, s3cdma); dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", pdata->num_phy_channels); return 0; err_slave_reg: dma_async_device_unregister(&s3cdma->memcpy); err_memcpy_reg: s3c24xx_dma_free_virtual_channels(&s3cdma->slave); err_slave: s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); err_memcpy: if (sdata->has_clocks) for (i = 0; i < pdata->num_phy_channels; i++) { struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; if (phy->valid) clk_unprepare(phy->clk); } return ret; } static int s3c24xx_dma_remove(struct platform_device *pdev) { const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev); struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev); int i; dma_async_device_unregister(&s3cdma->slave); dma_async_device_unregister(&s3cdma->memcpy); s3c24xx_dma_free_virtual_channels(&s3cdma->slave); s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); if (sdata->has_clocks) for (i = 0; i < pdata->num_phy_channels; i++) { struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; if (phy->valid) clk_unprepare(phy->clk); } return 0; } static struct platform_driver s3c24xx_dma_driver = { .driver = { .name = "s3c24xx-dma", }, .id_table = s3c24xx_dma_driver_ids, .probe = s3c24xx_dma_probe, .remove = s3c24xx_dma_remove, }; module_platform_driver(s3c24xx_dma_driver); bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) { struct s3c24xx_dma_chan *s3cchan; if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) return false; s3cchan = to_s3c24xx_dma_chan(chan); return s3cchan->id == (int)param; } EXPORT_SYMBOL(s3c24xx_dma_filter); MODULE_DESCRIPTION("S3C24XX DMA Driver"); MODULE_AUTHOR("Heiko Stuebner"); MODULE_LICENSE("GPL v2");
gpl-2.0
qoo00783/linux
drivers/crypto/qat/qat_common/qat_asym_algs.c
217
17830
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Contact Information: qat-linux@intel.com BSD LICENSE Copyright(c) 2014 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> #include <linux/dma-mapping.h> #include <linux/fips.h> #include "qat_rsakey-asn1.h" #include "icp_qat_fw_pke.h" #include "adf_accel_devices.h" #include "adf_transport.h" #include "adf_common_drv.h" #include "qat_crypto.h" static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; struct qat_rsa_input_params { union { struct { dma_addr_t m; dma_addr_t e; dma_addr_t n; } enc; struct { dma_addr_t c; dma_addr_t d; dma_addr_t n; } dec; u64 in_tab[8]; }; } __packed __aligned(64); struct qat_rsa_output_params { union { struct { dma_addr_t c; } enc; struct { dma_addr_t m; } dec; u64 out_tab[8]; }; } __packed __aligned(64); struct qat_rsa_ctx { char *n; char *e; char *d; dma_addr_t dma_n; dma_addr_t dma_e; dma_addr_t dma_d; unsigned int key_sz; struct qat_crypto_instance *inst; } __packed __aligned(64); struct qat_rsa_request { struct qat_rsa_input_params in; struct qat_rsa_output_params out; dma_addr_t phy_in; dma_addr_t phy_out; char *src_align; struct icp_qat_fw_pke_request req; struct qat_rsa_ctx *ctx; int err; } __aligned(64); static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) { struct akcipher_request *areq = (void *)(__force long)resp->opaque; struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( resp->pke_resp_hdr.comn_resp_flags); char *ptr = areq->dst; err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; if (req->src_align) dma_free_coherent(dev, req->ctx->key_sz, req->src_align, req->in.enc.m); else dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, DMA_TO_DEVICE); dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, DMA_FROM_DEVICE); dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); areq->dst_len = req->ctx->key_sz; /* Need to set the corect length of the output */ while (!(*ptr) && areq->dst_len) { areq->dst_len--; ptr++; } if (areq->dst_len != req->ctx->key_sz) memmove(areq->dst, ptr, areq->dst_len); akcipher_request_complete(areq, err); } void qat_alg_asym_callback(void *_resp) { struct icp_qat_fw_pke_resp *resp = _resp; qat_rsa_cb(resp); } #define PKE_RSA_EP_512 0x1c161b21 #define PKE_RSA_EP_1024 0x35111bf7 #define PKE_RSA_EP_1536 0x4d111cdc #define PKE_RSA_EP_2048 0x6e111dba #define PKE_RSA_EP_3072 0x7d111ea3 #define PKE_RSA_EP_4096 0xa5101f7e static unsigned long qat_rsa_enc_fn_id(unsigned int len) { unsigned int bitslen = len << 3; switch (bitslen) { case 512: return PKE_RSA_EP_512; case 1024: return PKE_RSA_EP_1024; case 1536: return PKE_RSA_EP_1536; case 2048: return PKE_RSA_EP_2048; case 3072: return PKE_RSA_EP_3072; case 4096: return PKE_RSA_EP_4096; default: return 0; }; } #define PKE_RSA_DP1_512 0x1c161b3c #define PKE_RSA_DP1_1024 0x35111c12 #define PKE_RSA_DP1_1536 0x4d111cf7 #define PKE_RSA_DP1_2048 0x6e111dda #define PKE_RSA_DP1_3072 0x7d111ebe #define PKE_RSA_DP1_4096 0xa5101f98 static unsigned long qat_rsa_dec_fn_id(unsigned int len) { unsigned int bitslen = len << 3; switch (bitslen) { case 512: return PKE_RSA_DP1_512; case 1024: return PKE_RSA_DP1_1024; case 1536: return PKE_RSA_DP1_1536; case 2048: return PKE_RSA_DP1_2048; case 3072: return PKE_RSA_DP1_3072; case 4096: return PKE_RSA_DP1_4096; default: return 0; }; } static int qat_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); struct qat_rsa_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; if (unlikely(!ctx->n || !ctx->e)) return -EINVAL; if (req->dst_len < ctx->key_sz) { req->dst_len = ctx->key_sz; return -EOVERFLOW; } memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; qat_req->ctx = ctx; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); qat_req->in.enc.e = ctx->dma_e; qat_req->in.enc.n = ctx->dma_n; ret = -ENOMEM; /* * src can be of any size in valid range, but HW expects it to be the * same as modulo n so in case it is different we need to allocate a * new buf and copy src data. * In other case we just need to map the user provided buffer. */ if (req->src_len < ctx->key_sz) { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, &qat_req->in.enc.m, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; memcpy(qat_req->src_align + shift, req->src, req->src_len); } else { qat_req->src_align = NULL; qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, DMA_TO_DEVICE); } qat_req->in.in_tab[3] = 0; qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, DMA_FROM_DEVICE); qat_req->out.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely((!qat_req->src_align && dma_mapping_error(dev, qat_req->in.enc.m)) || dma_mapping_error(dev, qat_req->out.enc.c) || dma_mapping_error(dev, qat_req->phy_in) || dma_mapping_error(dev, qat_req->phy_out))) goto unmap; msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; msg->pke_mid.opaque = (uint64_t)(__force long)req; msg->input_param_count = 3; msg->output_param_count = 1; do { ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) return -EINPROGRESS; unmap: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, qat_req->in.enc.m); else if (!dma_mapping_error(dev, qat_req->in.enc.m)) dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->out.enc.c)) dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, DMA_FROM_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_in)) dma_unmap_single(dev, qat_req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); return ret; } static int qat_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); struct qat_rsa_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; if (unlikely(!ctx->n || !ctx->d)) return -EINVAL; if (req->dst_len < ctx->key_sz) { req->dst_len = ctx->key_sz; return -EOVERFLOW; } memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; qat_req->ctx = ctx; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); qat_req->in.dec.d = ctx->dma_d; qat_req->in.dec.n = ctx->dma_n; ret = -ENOMEM; /* * src can be of any size in valid range, but HW expects it to be the * same as modulo n so in case it is different we need to allocate a * new buf and copy src data. * In other case we just need to map the user provided buffer. */ if (req->src_len < ctx->key_sz) { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, &qat_req->in.dec.c, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; memcpy(qat_req->src_align + shift, req->src, req->src_len); } else { qat_req->src_align = NULL; qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, DMA_TO_DEVICE); } qat_req->in.in_tab[3] = 0; qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, DMA_FROM_DEVICE); qat_req->out.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely((!qat_req->src_align && dma_mapping_error(dev, qat_req->in.dec.c)) || dma_mapping_error(dev, qat_req->out.dec.m) || dma_mapping_error(dev, qat_req->phy_in) || dma_mapping_error(dev, qat_req->phy_out))) goto unmap; msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; msg->pke_mid.opaque = (uint64_t)(__force long)req; msg->input_param_count = 3; msg->output_param_count = 1; do { ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) return -EINPROGRESS; unmap: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, qat_req->in.dec.c); else if (!dma_mapping_error(dev, qat_req->in.dec.c)) dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->out.dec.m)) dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, DMA_FROM_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_in)) dma_unmap_single(dev, qat_req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); return ret; } int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ctx->key_sz = vlen; ret = -EINVAL; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; ret = -ENOMEM; ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->n, ptr, ctx->key_sz); return 0; err: ctx->key_sz = 0; ctx->n = NULL; return ret; } int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; while (!*ptr && vlen) { ptr++; vlen--; } if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { ctx->e = NULL; return -EINVAL; } ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) { ctx->e = NULL; return -ENOMEM; } memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); return 0; } int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (vlen != 256 && vlen != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; } static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); int ret; /* Free the old key if any */ if (ctx->n) dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); if (ctx->e) dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } ctx->n = NULL; ctx->e = NULL; ctx->d = NULL; ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); if (ret < 0) goto free; if (!ctx->n || !ctx->e) { /* invalid key provided */ ret = -EINVAL; goto free; } return 0; free: if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); ctx->d = NULL; } if (ctx->e) { dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); ctx->e = NULL; } if (ctx->n) { dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); ctx->n = NULL; ctx->key_sz = 0; } return ret; } static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(get_current_node()); if (!inst) return -EINVAL; ctx->key_sz = 0; ctx->inst = inst; return 0; } static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); if (ctx->n) dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); if (ctx->e) dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } qat_crypto_put_instance(ctx->inst); ctx->n = NULL; ctx->d = NULL; ctx->d = NULL; } static struct akcipher_alg rsa = { .encrypt = qat_rsa_enc, .decrypt = qat_rsa_dec, .sign = qat_rsa_dec, .verify = qat_rsa_enc, .setkey = qat_rsa_setkey, .init = qat_rsa_init_tfm, .exit = qat_rsa_exit_tfm, .reqsize = sizeof(struct qat_rsa_request) + 64, .base = { .cra_name = "rsa", .cra_driver_name = "qat-rsa", .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_rsa_ctx), }, }; int qat_asym_algs_register(void) { int ret = 0; mutex_lock(&algs_lock); if (++active_devs == 1) { rsa.base.cra_flags = 0; ret = crypto_register_akcipher(&rsa); } mutex_unlock(&algs_lock); return ret; } void qat_asym_algs_unregister(void) { mutex_lock(&algs_lock); if (--active_devs == 0) crypto_unregister_akcipher(&rsa); mutex_unlock(&algs_lock); }
gpl-2.0
vanloswang/linux
drivers/staging/most/aim-cdev/cdev.c
217
13211
/* * cdev.c - Application interfacing module for character devices * * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This file is licensed under GPLv2. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kfifo.h> #include <linux/uaccess.h> #include <linux/idr.h> #include "mostcore.h" static dev_t aim_devno; static struct class *aim_class; static struct ida minor_id; static unsigned int major; struct aim_channel { wait_queue_head_t wq; struct cdev cdev; struct device *dev; struct mutex io_mutex; struct most_interface *iface; struct most_channel_config *cfg; unsigned int channel_id; dev_t devno; bool keep_mbo; unsigned int mbo_offs; struct mbo *stacked_mbo; DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *)); atomic_t access_ref; struct list_head list; }; #define to_channel(d) container_of(d, struct aim_channel, cdev) static struct list_head channel_list; static spinlock_t ch_list_lock; static struct aim_channel *get_channel(struct most_interface *iface, int id) { struct aim_channel *channel, *tmp; unsigned long flags; int found_channel = 0; spin_lock_irqsave(&ch_list_lock, flags); list_for_each_entry_safe(channel, tmp, &channel_list, list) { if ((channel->iface == iface) && (channel->channel_id == id)) { found_channel = 1; break; } } spin_unlock_irqrestore(&ch_list_lock, flags); if (!found_channel) return NULL; return channel; } /** * aim_open - implements the syscall to open the device * @inode: inode pointer * @filp: file pointer * * This stores the channel pointer in the private data field of * the file structure and activates the channel within the core. */ static int aim_open(struct inode *inode, struct file *filp) { struct aim_channel *channel; int ret; channel = to_channel(inode->i_cdev); filp->private_data = channel; if (((channel->cfg->direction == MOST_CH_RX) && ((filp->f_flags & O_ACCMODE) != O_RDONLY)) || ((channel->cfg->direction == MOST_CH_TX) && ((filp->f_flags & O_ACCMODE) != O_WRONLY))) { pr_info("WARN: Access flags mismatch\n"); return -EACCES; } if (!atomic_inc_and_test(&channel->access_ref)) { pr_info("WARN: Device is busy\n"); atomic_dec(&channel->access_ref); return -EBUSY; } ret = most_start_channel(channel->iface, channel->channel_id); if (ret) atomic_dec(&channel->access_ref); return ret; } /** * aim_close - implements the syscall to close the device * @inode: inode pointer * @filp: file pointer * * This stops the channel within the core. */ static int aim_close(struct inode *inode, struct file *filp) { int ret; struct mbo *mbo; struct aim_channel *channel = to_channel(inode->i_cdev); mutex_lock(&channel->io_mutex); if (!channel->dev) { mutex_unlock(&channel->io_mutex); atomic_dec(&channel->access_ref); device_destroy(aim_class, channel->devno); cdev_del(&channel->cdev); kfifo_free(&channel->fifo); list_del(&channel->list); ida_simple_remove(&minor_id, MINOR(channel->devno)); wake_up_interruptible(&channel->wq); kfree(channel); return 0; } mutex_unlock(&channel->io_mutex); while (0 != kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1)) most_put_mbo(mbo); if (channel->keep_mbo == true) most_put_mbo(channel->stacked_mbo); ret = most_stop_channel(channel->iface, channel->channel_id); atomic_dec(&channel->access_ref); wake_up_interruptible(&channel->wq); return ret; } /** * aim_write - implements the syscall to write to the device * @filp: file pointer * @buf: pointer to user buffer * @count: number of bytes to write * @offset: offset from where to start writing */ static ssize_t aim_write(struct file *filp, const char __user *buf, size_t count, loff_t *offset) { int ret, err; size_t actual_len = 0; size_t max_len = 0; ssize_t retval; struct mbo *mbo; struct aim_channel *channel = filp->private_data; mutex_lock(&channel->io_mutex); if (unlikely(!channel->dev)) { mutex_unlock(&channel->io_mutex); return -EPIPE; } mutex_unlock(&channel->io_mutex); mbo = most_get_mbo(channel->iface, channel->channel_id); if (!mbo && channel->dev) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; if (wait_event_interruptible( channel->wq, (mbo = most_get_mbo(channel->iface, channel->channel_id)) || (channel->dev == NULL))) return -ERESTARTSYS; } mutex_lock(&channel->io_mutex); if (unlikely(!channel->dev)) { mutex_unlock(&channel->io_mutex); err = -EPIPE; goto error; } mutex_unlock(&channel->io_mutex); max_len = channel->cfg->buffer_size; actual_len = min(count, max_len); mbo->buffer_length = actual_len; retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length); if (retval) { err = -EIO; goto error; } ret = most_submit_mbo(mbo); if (ret) { pr_info("submitting MBO to core failed\n"); err = ret; goto error; } return actual_len - retval; error: if (mbo) most_put_mbo(mbo); return err; } /** * aim_read - implements the syscall to read from the device * @filp: file pointer * @buf: pointer to user buffer * @count: number of bytes to read * @offset: offset from where to start reading */ static ssize_t aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset) { ssize_t retval; size_t not_copied, proc_len; struct mbo *mbo; struct aim_channel *channel = filp->private_data; if (channel->keep_mbo == true) { mbo = channel->stacked_mbo; channel->keep_mbo = false; goto start_copy; } while ((0 == kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev != NULL)) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(channel->wq, (!kfifo_is_empty(&channel->fifo) || (channel->dev == NULL)))) return -ERESTARTSYS; } start_copy: /* make sure we don't submit to gone devices */ mutex_lock(&channel->io_mutex); if (unlikely(!channel->dev)) { mutex_unlock(&channel->io_mutex); return -EIO; } if (count < mbo->processed_length) channel->keep_mbo = true; proc_len = min((int)count, (int)(mbo->processed_length - channel->mbo_offs)); not_copied = copy_to_user(buf, mbo->virt_address + channel->mbo_offs, proc_len); retval = not_copied ? proc_len - not_copied : proc_len; if (channel->keep_mbo == true) { channel->mbo_offs = retval; channel->stacked_mbo = mbo; } else { most_put_mbo(mbo); channel->mbo_offs = 0; } mutex_unlock(&channel->io_mutex); return retval; } /** * Initialization of struct file_operations */ static const struct file_operations channel_fops = { .owner = THIS_MODULE, .read = aim_read, .write = aim_write, .open = aim_open, .release = aim_close, }; /** * aim_disconnect_channel - disconnect a channel * @iface: pointer to interface instance * @channel_id: channel index * * This frees allocated memory and removes the cdev that represents this * channel in user space. */ static int aim_disconnect_channel(struct most_interface *iface, int channel_id) { struct aim_channel *channel; unsigned long flags; if (!iface) { pr_info("Bad interface pointer\n"); return -EINVAL; } channel = get_channel(iface, channel_id); if (channel == NULL) return -ENXIO; mutex_lock(&channel->io_mutex); channel->dev = NULL; mutex_unlock(&channel->io_mutex); if (atomic_read(&channel->access_ref)) { device_destroy(aim_class, channel->devno); cdev_del(&channel->cdev); kfifo_free(&channel->fifo); ida_simple_remove(&minor_id, MINOR(channel->devno)); spin_lock_irqsave(&ch_list_lock, flags); list_del(&channel->list); spin_unlock_irqrestore(&ch_list_lock, flags); kfree(channel); } else { wake_up_interruptible(&channel->wq); } return 0; } /** * aim_rx_completion - completion handler for rx channels * @mbo: pointer to buffer object that has completed * * This searches for the channel linked to this MBO and stores it in the local * fifo buffer. */ static int aim_rx_completion(struct mbo *mbo) { struct aim_channel *channel; if (!mbo) return -EINVAL; channel = get_channel(mbo->ifp, mbo->hdm_channel_id); if (channel == NULL) return -ENXIO; kfifo_in(&channel->fifo, &mbo, 1); #ifdef DEBUG_MESG if (kfifo_is_full(&channel->fifo)) pr_info("WARN: Fifo is full\n"); #endif wake_up_interruptible(&channel->wq); return 0; } /** * aim_tx_completion - completion handler for tx channels * @iface: pointer to interface instance * @channel_id: channel index/ID * * This wakes sleeping processes in the wait-queue. */ static int aim_tx_completion(struct most_interface *iface, int channel_id) { struct aim_channel *channel; if (!iface) { pr_info("Bad interface pointer\n"); return -EINVAL; } if ((channel_id < 0) || (channel_id >= iface->num_channels)) { pr_info("Channel ID out of range\n"); return -EINVAL; } channel = get_channel(iface, channel_id); if (channel == NULL) return -ENXIO; wake_up_interruptible(&channel->wq); return 0; } static struct most_aim cdev_aim; /** * aim_probe - probe function of the driver module * @iface: pointer to interface instance * @channel_id: channel index/ID * @cfg: pointer to actual channel configuration * @parent: pointer to kobject (needed for sysfs hook-up) * @name: name of the device to be created * * This allocates achannel object and creates the device node in /dev * * Returns 0 on success or error code otherwise. */ static int aim_probe(struct most_interface *iface, int channel_id, struct most_channel_config *cfg, struct kobject *parent, char *name) { struct aim_channel *channel; unsigned long cl_flags; int retval; int current_minor; if ((!iface) || (!cfg) || (!parent) || (!name)) { pr_info("Probing AIM with bad arguments"); return -EINVAL; } channel = get_channel(iface, channel_id); if (channel) return -EEXIST; current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL); if (current_minor < 0) return current_minor; channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) { pr_info("failed to alloc channel object\n"); retval = -ENOMEM; goto error_alloc_channel; } channel->devno = MKDEV(major, current_minor); cdev_init(&channel->cdev, &channel_fops); channel->cdev.owner = THIS_MODULE; cdev_add(&channel->cdev, channel->devno, 1); channel->iface = iface; channel->cfg = cfg; channel->channel_id = channel_id; channel->mbo_offs = 0; atomic_set(&channel->access_ref, -1); INIT_KFIFO(channel->fifo); retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL); if (retval) { pr_info("failed to alloc channel kfifo"); goto error_alloc_kfifo; } init_waitqueue_head(&channel->wq); mutex_init(&channel->io_mutex); spin_lock_irqsave(&ch_list_lock, cl_flags); list_add_tail(&channel->list, &channel_list); spin_unlock_irqrestore(&ch_list_lock, cl_flags); channel->dev = device_create(aim_class, NULL, channel->devno, NULL, "%s", name); retval = IS_ERR(channel->dev); if (retval) { pr_info("failed to create new device node %s\n", name); goto error_create_device; } kobject_uevent(&channel->dev->kobj, KOBJ_ADD); return 0; error_create_device: kfifo_free(&channel->fifo); list_del(&channel->list); error_alloc_kfifo: cdev_del(&channel->cdev); kfree(channel); error_alloc_channel: ida_simple_remove(&minor_id, current_minor); return retval; } static struct most_aim cdev_aim = { .name = "cdev", .probe_channel = aim_probe, .disconnect_channel = aim_disconnect_channel, .rx_completion = aim_rx_completion, .tx_completion = aim_tx_completion, }; static int __init mod_init(void) { pr_info("init()\n"); INIT_LIST_HEAD(&channel_list); spin_lock_init(&ch_list_lock); ida_init(&minor_id); if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0) return -EIO; major = MAJOR(aim_devno); aim_class = class_create(THIS_MODULE, "most_cdev_aim"); if (IS_ERR(aim_class)) { pr_err("no udev support\n"); goto free_cdev; } if (most_register_aim(&cdev_aim)) goto dest_class; return 0; dest_class: class_destroy(aim_class); free_cdev: unregister_chrdev_region(aim_devno, 1); return -EIO; } static void __exit mod_exit(void) { struct aim_channel *channel, *tmp; pr_info("exit module\n"); most_deregister_aim(&cdev_aim); list_for_each_entry_safe(channel, tmp, &channel_list, list) { device_destroy(aim_class, channel->devno); cdev_del(&channel->cdev); kfifo_free(&channel->fifo); list_del(&channel->list); ida_simple_remove(&minor_id, MINOR(channel->devno)); kfree(channel); } class_destroy(aim_class); unregister_chrdev_region(aim_devno, 1); ida_destroy(&minor_id); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("character device AIM for mostcore");
gpl-2.0
TheGreatSega/Rush-Kernel
fs/afs/flock.c
1497
16179
/* AFS file locking support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/smp_lock.h> #include "internal.h" #define AFS_LOCK_GRANTED 0 #define AFS_LOCK_PENDING 1 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; static DEFINE_MUTEX(afs_lock_manager_mutex); static const struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, .fl_release_private = afs_fl_release_private, }; /* * initialise the lock manager thread if it isn't already running */ static int afs_init_lock_manager(void) { int ret; ret = 0; if (!afs_lock_manager) { mutex_lock(&afs_lock_manager_mutex); if (!afs_lock_manager) { afs_lock_manager = create_singlethread_workqueue("kafs_lockd"); if (!afs_lock_manager) ret = -ENOMEM; } mutex_unlock(&afs_lock_manager_mutex); } return ret; } /* * destroy the lock manager thread if it's running */ void __exit afs_kill_lock_manager(void) { if (afs_lock_manager) destroy_workqueue(afs_lock_manager); } /* * if the callback is broken on this vnode, then the lock may now be available */ void afs_lock_may_be_available(struct afs_vnode *vnode) { _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); } /* * the lock will time out in 5 minutes unless we extend it, so schedule * extension in a bit less than that time */ static void afs_schedule_lock_extension(struct afs_vnode *vnode) { queue_delayed_work(afs_lock_manager, &vnode->lock_work, AFS_LOCKWAIT * HZ / 2); } /* * grant one or more locks (readlocks are allowed to jump the queue if the * first lock in the queue is itself a readlock) * - the caller must hold the vnode lock */ static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) { struct file_lock *p, *_p; list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); if (fl->fl_type == F_RDLCK) { list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (p->fl_type == F_RDLCK) { p->fl_u.afs.state = AFS_LOCK_GRANTED; list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); wake_up(&p->fl_wait); } } } } /* * do work for a lock, including: * - probing for a lock we're waiting on but didn't get immediately * - extending a lock that's close to timing out */ void afs_lock_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, lock_work.work); struct file_lock *fl; afs_lock_type_t type; struct key *key; int ret; _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); spin_lock(&vnode->lock); if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) { _debug("unlock"); spin_unlock(&vnode->lock); /* attempt to release the server lock; if it fails, we just * wait 5 minutes and it'll time out anyway */ ret = afs_vnode_release_lock(vnode, vnode->unlock_key); if (ret < 0) printk(KERN_WARNING "AFS:" " Failed to release lock on {%x:%x} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); spin_lock(&vnode->lock); key_put(vnode->unlock_key); vnode->unlock_key = NULL; clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags); } /* if we've got a lock, then it must be time to extend that lock as AFS * locks time out after 5 minutes */ if (!list_empty(&vnode->granted_locks)) { _debug("extend"); if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) BUG(); fl = list_entry(vnode->granted_locks.next, struct file_lock, fl_u.afs.link); key = key_get(fl->fl_file->private_data); spin_unlock(&vnode->lock); ret = afs_vnode_extend_lock(vnode, key); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); key_put(key); switch (ret) { case 0: afs_schedule_lock_extension(vnode); break; default: /* ummm... we failed to extend the lock - retry * extension shortly */ printk(KERN_WARNING "AFS:" " Failed to extend lock on {%x:%x} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 10); break; } _leave(" [extend]"); return; } /* if we don't have a granted lock, then we must've been called back by * the server, and so if might be possible to get a lock we're * currently waiting for */ if (!list_empty(&vnode->pending_locks)) { _debug("get"); if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) BUG(); fl = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); key = key_get(fl->fl_file->private_data); type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; spin_unlock(&vnode->lock); ret = afs_vnode_set_lock(vnode, key, type); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); switch (ret) { case -EWOULDBLOCK: _debug("blocked"); break; case 0: _debug("acquired"); if (type == AFS_LOCK_READ) set_bit(AFS_VNODE_READLOCKED, &vnode->flags); else set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); ret = AFS_LOCK_GRANTED; default: spin_lock(&vnode->lock); /* the pending lock may have been withdrawn due to a * signal */ if (list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link) == fl) { fl->fl_u.afs.state = ret; if (ret == AFS_LOCK_GRANTED) afs_grant_locks(vnode, fl); else list_del_init(&fl->fl_u.afs.link); wake_up(&fl->fl_wait); spin_unlock(&vnode->lock); } else { _debug("withdrawn"); clear_bit(AFS_VNODE_READLOCKED, &vnode->flags); clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); spin_unlock(&vnode->lock); afs_vnode_release_lock(vnode, key); if (!list_empty(&vnode->pending_locks)) afs_lock_may_be_available(vnode); } break; } key_put(key); _leave(" [pend]"); return; } /* looks like the lock request was withdrawn on a signal */ spin_unlock(&vnode->lock); _leave(" [no locks]"); } /* * pass responsibility for the unlocking of a vnode on the server to the * manager thread, lest a pending signal in the calling thread interrupt * AF_RXRPC * - the caller must hold the vnode lock */ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) { cancel_delayed_work(&vnode->lock_work); if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) && !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags)) BUG(); if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) BUG(); vnode->unlock_key = key_get(key); afs_lock_may_be_available(vnode); } /* * request a lock on a file on the server */ static int afs_do_setlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); afs_lock_type_t type; struct key *key = file->private_data; int ret; _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file locks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) return -EINVAL; ret = afs_init_lock_manager(); if (ret < 0) return ret; fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; lock_kernel(); /* make sure we've got a callback on this file and that our view of the * data version is up to date */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) { ret = -EAGAIN; goto error; } spin_lock(&vnode->lock); /* if we've already got a readlock on the server then we can instantly * grant another readlock, irrespective of whether there are any * pending writelocks */ if (type == AFS_LOCK_READ && vnode->flags & (1 << AFS_VNODE_READLOCKED)) { _debug("instant readlock"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); ASSERT(!list_empty(&vnode->granted_locks)); goto sharing_existing_lock; } /* if there's no-one else with a lock on this vnode, then we need to * ask the server for a lock */ if (list_empty(&vnode->pending_locks) && list_empty(&vnode->granted_locks)) { _debug("not locked"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); set_bit(AFS_VNODE_LOCKING, &vnode->flags); spin_unlock(&vnode->lock); ret = afs_vnode_set_lock(vnode, key, type); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); switch (ret) { case 0: _debug("acquired"); goto acquired_server_lock; case -EWOULDBLOCK: _debug("would block"); spin_lock(&vnode->lock); ASSERT(list_empty(&vnode->granted_locks)); ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); goto wait; default: spin_lock(&vnode->lock); list_del_init(&fl->fl_u.afs.link); spin_unlock(&vnode->lock); goto error; } } /* otherwise, we need to wait for a local lock to become available */ _debug("wait local"); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); wait: if (!(fl->fl_flags & FL_SLEEP)) { _debug("noblock"); ret = -EAGAIN; goto abort_attempt; } spin_unlock(&vnode->lock); /* now we need to sleep and wait for the lock manager thread to get the * lock from the server */ _debug("sleep"); ret = wait_event_interruptible(fl->fl_wait, fl->fl_u.afs.state <= AFS_LOCK_GRANTED); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) goto error; spin_lock(&vnode->lock); goto given_lock; } /* we were interrupted, but someone may still be in the throes of * giving us the lock */ _debug("intr"); ASSERTCMP(ret, ==, -ERESTARTSYS); spin_lock(&vnode->lock); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) { spin_unlock(&vnode->lock); goto error; } goto given_lock; } abort_attempt: /* we aren't going to get the lock, either because we're unwilling to * wait, or because some signal happened */ _debug("abort"); if (list_empty(&vnode->granted_locks) && vnode->pending_locks.next == &fl->fl_u.afs.link) { if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { /* kick the next pending lock into having a go */ list_del_init(&fl->fl_u.afs.link); afs_lock_may_be_available(vnode); } } else { list_del_init(&fl->fl_u.afs.link); } spin_unlock(&vnode->lock); goto error; acquired_server_lock: /* we've acquired a server lock, but it needs to be renewed after 5 * mins */ spin_lock(&vnode->lock); afs_schedule_lock_extension(vnode); if (type == AFS_LOCK_READ) set_bit(AFS_VNODE_READLOCKED, &vnode->flags); else set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); sharing_existing_lock: /* the lock has been granted as far as we're concerned... */ fl->fl_u.afs.state = AFS_LOCK_GRANTED; list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); given_lock: /* ... but we do still need to get the VFS's blessing */ ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED))) != 0); ret = posix_lock_file(file, fl, NULL); if (ret < 0) goto vfs_rejected_lock; spin_unlock(&vnode->lock); /* again, make sure we've got a callback on this file and, again, make * sure that our view of the data version is up to date (we ignore * errors incurred here and deal with the consequences elsewhere) */ afs_vnode_fetch_status(vnode, NULL, key); error: unlock_kernel(); _leave(" = %d", ret); return ret; vfs_rejected_lock: /* the VFS rejected the lock we just obtained, so we have to discard * what we just got */ _debug("vfs refused %d", ret); list_del_init(&fl->fl_u.afs.link); if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); goto abort_attempt; } /* * unlock on a file on the server */ static int afs_do_unlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret; _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file unlocks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) return -EINVAL; fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; spin_lock(&vnode->lock); ret = posix_lock_file(file, fl, NULL); if (ret < 0) { spin_unlock(&vnode->lock); _leave(" = %d [vfs]", ret); return ret; } /* discard the server lock only if all granted locks are gone */ if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); spin_unlock(&vnode->lock); _leave(" = 0"); return 0; } /* * return information about a lock we currently hold, if indeed we hold one */ static int afs_do_getlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret, lock_count; _enter(""); fl->fl_type = F_UNLCK; mutex_lock(&vnode->vfs_inode.i_mutex); /* check local lock records first */ ret = 0; posix_test_lock(file, fl); if (fl->fl_type == F_UNLCK) { /* no local locks; consult the server */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; lock_count = vnode->status.lock_count; if (lock_count) { if (lock_count > 0) fl->fl_type = F_RDLCK; else fl->fl_type = F_WRLCK; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; } } error: mutex_unlock(&vnode->vfs_inode.i_mutex); _leave(" = %d [%hd]", ret, fl->fl_type); return ret; } /* * manage POSIX locks on a file */ int afs_lock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags, (long long) fl->fl_start, (long long) fl->fl_end); /* AFS doesn't support mandatory locks */ if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) return -ENOLCK; if (IS_GETLK(cmd)) return afs_do_getlk(file, fl); if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl); } /* * manage FLOCK locks on a file */ int afs_flock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); _enter("{%x:%u},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags); /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; /* we're simulating flock() locks using posix locks on the server */ fl->fl_owner = (fl_owner_t) file; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl); } /* * the POSIX lock management core VFS code copies the lock record and adds the * copy into its own list, so we need to add that copy to the vnode's lock * queue in the same place as the original (which will be deleted shortly * after) */ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) { _enter(""); list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); } /* * need to remove this lock from the vnode queue when it's removed from the * VFS's list */ static void afs_fl_release_private(struct file_lock *fl) { _enter(""); list_del_init(&fl->fl_u.afs.link); }
gpl-2.0
iConsole/Console-OS_kernel_common
arch/arm/mach-mv78xx0/common.c
1497
10942
/* * arch/arm/mach-mv78xx0/common.c * * Core functions for Marvell MV78xx0 SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/ata_platform.h> #include <linux/clk-provider.h> #include <linux/ethtool.h> #include <asm/hardware/cache-feroceon-l2.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/mv78xx0.h> #include <mach/bridge-regs.h> #include <linux/platform_data/usb-ehci-orion.h> #include <linux/platform_data/mtd-orion_nand.h> #include <plat/time.h> #include <plat/common.h> #include <plat/addr-map.h> #include "common.h" static int get_tclk(void); /***************************************************************************** * Common bits ****************************************************************************/ int mv78xx0_core_index(void) { u32 extra; /* * Read Extra Features register. */ __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (extra)); return !!(extra & 0x00004000); } static int get_hclk(void) { int hclk; /* * HCLK tick rate is configured by DEV_D[7:5] pins. */ switch ((readl(SAMPLE_AT_RESET_LOW) >> 5) & 7) { case 0: hclk = 166666667; break; case 1: hclk = 200000000; break; case 2: hclk = 266666667; break; case 3: hclk = 333333333; break; case 4: hclk = 400000000; break; default: panic("unknown HCLK PLL setting: %.8x\n", readl(SAMPLE_AT_RESET_LOW)); } return hclk; } static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk) { u32 cfg; /* * Core #0 PCLK/L2CLK is configured by bits [13:8], core #1 * PCLK/L2CLK by bits [19:14]. */ if (core_index == 0) { cfg = (readl(SAMPLE_AT_RESET_LOW) >> 8) & 0x3f; } else { cfg = (readl(SAMPLE_AT_RESET_LOW) >> 14) & 0x3f; } /* * Bits [11:8] ([17:14] for core #1) configure the PCLK:HCLK * ratio (1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6). */ *pclk = ((u64)hclk * (2 + (cfg & 0xf))) >> 1; /* * Bits [13:12] ([19:18] for core #1) configure the PCLK:L2CLK * ratio (1, 2, 3). */ *l2clk = *pclk / (((cfg >> 4) & 3) + 1); } static int get_tclk(void) { int tclk_freq; /* * TCLK tick rate is configured by DEV_A[2:0] strap pins. */ switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) { case 1: tclk_freq = 166666667; break; case 3: tclk_freq = 200000000; break; default: panic("unknown TCLK PLL setting: %.8x\n", readl(SAMPLE_AT_RESET_HIGH)); } return tclk_freq; } /***************************************************************************** * I/O Address Mapping ****************************************************************************/ static struct map_desc mv78xx0_io_desc[] __initdata = { { .virtual = (unsigned long) MV78XX0_CORE_REGS_VIRT_BASE, .pfn = 0, .length = MV78XX0_CORE_REGS_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long) MV78XX0_REGS_VIRT_BASE, .pfn = __phys_to_pfn(MV78XX0_REGS_PHYS_BASE), .length = MV78XX0_REGS_SIZE, .type = MT_DEVICE, }, }; void __init mv78xx0_map_io(void) { unsigned long phys; /* * Map the right set of per-core registers depending on * which core we are running on. */ if (mv78xx0_core_index() == 0) { phys = MV78XX0_CORE0_REGS_PHYS_BASE; } else { phys = MV78XX0_CORE1_REGS_PHYS_BASE; } mv78xx0_io_desc[0].pfn = __phys_to_pfn(phys); iotable_init(mv78xx0_io_desc, ARRAY_SIZE(mv78xx0_io_desc)); } /***************************************************************************** * CLK tree ****************************************************************************/ static struct clk *tclk; static void __init clk_init(void) { tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT, get_tclk()); orion_clkdev_init(tclk); } /***************************************************************************** * EHCI ****************************************************************************/ void __init mv78xx0_ehci0_init(void) { orion_ehci_init(USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA); } /***************************************************************************** * EHCI1 ****************************************************************************/ void __init mv78xx0_ehci1_init(void) { orion_ehci_1_init(USB1_PHYS_BASE, IRQ_MV78XX0_USB_1); } /***************************************************************************** * EHCI2 ****************************************************************************/ void __init mv78xx0_ehci2_init(void) { orion_ehci_2_init(USB2_PHYS_BASE, IRQ_MV78XX0_USB_2); } /***************************************************************************** * GE00 ****************************************************************************/ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, IRQ_MV78XX0_GE_ERR, MV643XX_TX_CSUM_DEFAULT_LIMIT); } /***************************************************************************** * GE01 ****************************************************************************/ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, NO_IRQ, MV643XX_TX_CSUM_DEFAULT_LIMIT); } /***************************************************************************** * GE10 ****************************************************************************/ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) { u32 dev, rev; /* * On the Z0, ge10 and ge11 are internally connected back * to back, and not brought out. */ mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { eth_data->phy_addr = MV643XX_ETH_PHY_NONE; eth_data->speed = SPEED_1000; eth_data->duplex = DUPLEX_FULL; } orion_ge10_init(eth_data, GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM, NO_IRQ); } /***************************************************************************** * GE11 ****************************************************************************/ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) { u32 dev, rev; /* * On the Z0, ge10 and ge11 are internally connected back * to back, and not brought out. */ mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { eth_data->phy_addr = MV643XX_ETH_PHY_NONE; eth_data->speed = SPEED_1000; eth_data->duplex = DUPLEX_FULL; } orion_ge11_init(eth_data, GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM, NO_IRQ); } /***************************************************************************** * I2C ****************************************************************************/ void __init mv78xx0_i2c_init(void) { orion_i2c_init(I2C_0_PHYS_BASE, IRQ_MV78XX0_I2C_0, 8); orion_i2c_1_init(I2C_1_PHYS_BASE, IRQ_MV78XX0_I2C_1, 8); } /***************************************************************************** * SATA ****************************************************************************/ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data) { orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_MV78XX0_SATA); } /***************************************************************************** * UART0 ****************************************************************************/ void __init mv78xx0_uart0_init(void) { orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, IRQ_MV78XX0_UART_0, tclk); } /***************************************************************************** * UART1 ****************************************************************************/ void __init mv78xx0_uart1_init(void) { orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, IRQ_MV78XX0_UART_1, tclk); } /***************************************************************************** * UART2 ****************************************************************************/ void __init mv78xx0_uart2_init(void) { orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE, IRQ_MV78XX0_UART_2, tclk); } /***************************************************************************** * UART3 ****************************************************************************/ void __init mv78xx0_uart3_init(void) { orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE, IRQ_MV78XX0_UART_3, tclk); } /***************************************************************************** * Time handling ****************************************************************************/ void __init mv78xx0_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); if (mv78xx0_core_index() == 0) mvebu_mbus_init("marvell,mv78xx0-mbus", BRIDGE_WINS_CPU0_BASE, BRIDGE_WINS_SZ, DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ); else mvebu_mbus_init("marvell,mv78xx0-mbus", BRIDGE_WINS_CPU1_BASE, BRIDGE_WINS_SZ, DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ); } void __init_refok mv78xx0_timer_init(void) { orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, IRQ_MV78XX0_TIMER_1, get_tclk()); } /***************************************************************************** * General ****************************************************************************/ static char * __init mv78xx0_id(void) { u32 dev, rev; mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { if (rev == MV78X00_REV_Z0) return "MV78X00-Z0"; else return "MV78X00-Rev-Unsupported"; } else if (dev == MV78100_DEV_ID) { if (rev == MV78100_REV_A0) return "MV78100-A0"; else if (rev == MV78100_REV_A1) return "MV78100-A1"; else return "MV78100-Rev-Unsupported"; } else if (dev == MV78200_DEV_ID) { if (rev == MV78100_REV_A0) return "MV78200-A0"; else return "MV78200-Rev-Unsupported"; } else { return "Device-Unknown"; } } static int __init is_l2_writethrough(void) { return !!(readl(CPU_CONTROL) & L2_WRITETHROUGH); } void __init mv78xx0_init(void) { int core_index; int hclk; int pclk; int l2clk; core_index = mv78xx0_core_index(); hclk = get_hclk(); get_pclk_l2clk(hclk, core_index, &pclk, &l2clk); printk(KERN_INFO "%s ", mv78xx0_id()); printk("core #%d, ", core_index); printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000); printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000); printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000); printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000); #ifdef CONFIG_CACHE_FEROCEON_L2 feroceon_l2_init(is_l2_writethrough()); #endif /* Setup root of clk tree */ clk_init(); } void mv78xx0_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. */ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK); /* * Assert soft reset. */ writel(SOFT_RESET, SYSTEM_SOFT_RESET); while (1) ; }
gpl-2.0
boulzordev/android_kernel_motorola_msm8916
arch/s390/mm/pageattr.c
2009
2938
/* * Copyright IBM Corp. 2011 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/hugetlb.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/page.h> static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) { asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" : [addr] "+a" (addr) : [skey] "d" (skey)); return addr; } void storage_key_init_range(unsigned long start, unsigned long end) { unsigned long boundary, size; while (start < end) { if (MACHINE_HAS_EDAT1) { /* set storage keys for a 1MB frame */ size = 1UL << 20; boundary = (start + size) & ~(size - 1); if (boundary <= end) { do { start = sske_frame(start, PAGE_DEFAULT_KEY); } while (start < boundary); continue; } } page_set_storage_key(start, PAGE_DEFAULT_KEY, 0); start += PAGE_SIZE; } } static pte_t *walk_page_table(unsigned long addr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(addr); if (pgd_none(*pgdp)) return NULL; pudp = pud_offset(pgdp, addr); if (pud_none(*pudp) || pud_large(*pudp)) return NULL; pmdp = pmd_offset(pudp, addr); if (pmd_none(*pmdp) || pmd_large(*pmdp)) return NULL; ptep = pte_offset_kernel(pmdp, addr); if (pte_none(*ptep)) return NULL; return ptep; } static void change_page_attr(unsigned long addr, int numpages, pte_t (*set) (pte_t)) { pte_t *ptep, pte; int i; for (i = 0; i < numpages; i++) { ptep = walk_page_table(addr); if (WARN_ON_ONCE(!ptep)) break; pte = *ptep; pte = set(pte); __ptep_ipte(addr, ptep); *ptep = pte; addr += PAGE_SIZE; } } int set_memory_ro(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_wrprotect); return 0; } int set_memory_rw(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_mkwrite); return 0; } /* not possible */ int set_memory_nx(unsigned long addr, int numpages) { return 0; } int set_memory_x(unsigned long addr, int numpages) { return 0; } #ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long address; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i; for (i = 0; i < numpages; i++) { address = page_to_phys(page + i); pgd = pgd_offset_k(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); if (!enable) { __ptep_ipte(address, pte); pte_val(*pte) = _PAGE_TYPE_EMPTY; continue; } pte_val(*pte) = __pa(address); } } #ifdef CONFIG_HIBERNATION bool kernel_page_present(struct page *page) { unsigned long addr; int cc; addr = page_to_phys(page); asm volatile( " lra %1,0(%1)\n" " ipm %0\n" " srl %0,28" : "=d" (cc), "+a" (addr) : : "cc"); return cc == 0; } #endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_DEBUG_PAGEALLOC */
gpl-2.0
matthew-l-weber/linux-3-10-rc1-moxart
drivers/ata/pata_hpt37x.c
2777
26122
/* * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers. * * This driver is heavily based upon: * * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 * * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * TODO * Look into engine reset on timeout errors. Should not be required. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt37x" #define DRV_VERSION "0.6.23" struct hpt_clock { u8 xfer_speed; u32 timing; }; struct hpt_chip { const char *name; unsigned int base; struct hpt_clock const *clocks[4]; }; /* key for bus clock timings * bit * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. * 28 UDMA enable. * 29 DMA enable. * 30 PIO_MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 FIFO enable. Only for PIO. */ static struct hpt_clock hpt37x_timings_33[] = { { XFER_UDMA_6, 0x12446231 }, /* 0x12646231 ?? */ { XFER_UDMA_5, 0x12446231 }, { XFER_UDMA_4, 0x12446231 }, { XFER_UDMA_3, 0x126c6231 }, { XFER_UDMA_2, 0x12486231 }, { XFER_UDMA_1, 0x124c6233 }, { XFER_UDMA_0, 0x12506297 }, { XFER_MW_DMA_2, 0x22406c31 }, { XFER_MW_DMA_1, 0x22406c33 }, { XFER_MW_DMA_0, 0x22406c97 }, { XFER_PIO_4, 0x06414e31 }, { XFER_PIO_3, 0x06414e42 }, { XFER_PIO_2, 0x06414e53 }, { XFER_PIO_1, 0x06814e93 }, { XFER_PIO_0, 0x06814ea7 } }; static struct hpt_clock hpt37x_timings_50[] = { { XFER_UDMA_6, 0x12848242 }, { XFER_UDMA_5, 0x12848242 }, { XFER_UDMA_4, 0x12ac8242 }, { XFER_UDMA_3, 0x128c8242 }, { XFER_UDMA_2, 0x120c8242 }, { XFER_UDMA_1, 0x12148254 }, { XFER_UDMA_0, 0x121882ea }, { XFER_MW_DMA_2, 0x22808242 }, { XFER_MW_DMA_1, 0x22808254 }, { XFER_MW_DMA_0, 0x228082ea }, { XFER_PIO_4, 0x0a81f442 }, { XFER_PIO_3, 0x0a81f443 }, { XFER_PIO_2, 0x0a81f454 }, { XFER_PIO_1, 0x0ac1f465 }, { XFER_PIO_0, 0x0ac1f48a } }; static struct hpt_clock hpt37x_timings_66[] = { { XFER_UDMA_6, 0x1c869c62 }, { XFER_UDMA_5, 0x1cae9c62 }, /* 0x1c8a9c62 */ { XFER_UDMA_4, 0x1c8a9c62 }, { XFER_UDMA_3, 0x1c8e9c62 }, { XFER_UDMA_2, 0x1c929c62 }, { XFER_UDMA_1, 0x1c9a9c62 }, { XFER_UDMA_0, 0x1c829c62 }, { XFER_MW_DMA_2, 0x2c829c62 }, { XFER_MW_DMA_1, 0x2c829c66 }, { XFER_MW_DMA_0, 0x2c829d2e }, { XFER_PIO_4, 0x0c829c62 }, { XFER_PIO_3, 0x0c829c84 }, { XFER_PIO_2, 0x0c829ca6 }, { XFER_PIO_1, 0x0d029d26 }, { XFER_PIO_0, 0x0d029d5e } }; static const struct hpt_chip hpt370 = { "HPT370", 48, { hpt37x_timings_33, NULL, NULL, NULL } }; static const struct hpt_chip hpt370a = { "HPT370A", 48, { hpt37x_timings_33, NULL, hpt37x_timings_50, NULL } }; static const struct hpt_chip hpt372 = { "HPT372", 55, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt302 = { "HPT302", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt371 = { "HPT371", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt372a = { "HPT372A", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt374 = { "HPT374", 48, { hpt37x_timings_33, NULL, NULL, NULL } }; /** * hpt37x_find_mode - reset the hpt37x bus * @ap: ATA port * @speed: transfer mode * * Return the 32bit register programming information for this channel * that matches the speed provided. */ static u32 hpt37x_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = ap->host->private_data; while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; } BUG(); return 0xffffffffU; /* silence compiler warning */ } static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char * const list[]) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i = 0; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); while (list[i] != NULL) { if (!strcmp(list[i], model_num)) { pr_warn("%s is not supported for %s\n", modestr, list[i]); return 1; } i++; } return 0; } static const char * const bad_ata33[] = { "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; static const char * const bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", NULL }; /** * hpt370_filter - mode selection filter * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; } /** * hpt370a_filter - mode selection filter * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; } /** * hpt372_filter - mode selection filter * @adev: ATA device * @mask: mode mask * * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); return mask; } /** * hpt37x_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt37x_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 scr2, ata66; pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); udelay(10); /* debounce */ /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt374_fn1_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt374_fn1_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned int mcrbase = 0x50 + 4 * ap->port_no; u16 mcr3; u8 ata66; /* Do the extra channel work */ pci_read_config_word(pdev, mcrbase + 2, &mcr3); /* Set bit 15 of 0x52 to enable TCBLID as input */ pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000); pci_read_config_byte(pdev, 0x5A, &ata66); /* Reset TCBLID/FCBLID to output */ pci_write_config_word(pdev, mcrbase + 2, mcr3); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt37x_pre_reset - reset the hpt37x bus * @link: ATA link to reset * @deadline: deadline jiffies for the operation * * Perform the initial reset handling for the HPT37x. */ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits hpt37x_enable_bits[] = { { 0x50, 1, 0x04, 0x04 }, { 0x54, 1, 0x04, 0x04 } }; if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) return -ENOENT; /* Reset the state machine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); return ata_sff_prereset(link, deadline); } static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x02; fast |= 0x01; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt37x_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt370_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt370_set_mode(ap, adev, adev->pio_mode); } /** * hpt370_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt370_set_mode(ap, adev, adev->dma_mode); } /** * hpt370_bmdma_end - DMA engine stop * @qc: ATA command * * Work around the HPT370 DMA engine. */ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); void __iomem *bmdma = ap->ioaddr.bmdma_addr; u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS); u8 dma_cmd; if (dma_stat & ATA_DMA_ACTIVE) { udelay(20); dma_stat = ioread8(bmdma + ATA_DMA_STATUS); } if (dma_stat & ATA_DMA_ACTIVE) { /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); /* Stop DMA */ dma_cmd = ioread8(bmdma + ATA_DMA_CMD); iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD); /* Clear Error */ dma_stat = ioread8(bmdma + ATA_DMA_STATUS); iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, bmdma + ATA_DMA_STATUS); /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); } ata_bmdma_stop(qc); } static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt37x_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt372_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt372_set_mode(ap, adev, adev->pio_mode); } /** * hpt372_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt372_set_mode(ap, adev, adev->dma_mode); } /** * hpt37x_bmdma_end - DMA engine stop * @qc: ATA command * * Clean up after the HPT372 and later DMA engine */ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int mscreg = 0x50 + 4 * ap->port_no; u8 bwsr_stat, msc_stat; pci_read_config_byte(pdev, 0x6A, &bwsr_stat); pci_read_config_byte(pdev, mscreg, &msc_stat); if (bwsr_stat & (1 << ap->port_no)) pci_write_config_byte(pdev, mscreg, msc_stat | 0x30); ata_bmdma_stop(qc); } static struct scsi_host_template hpt37x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; /* * Configuration for HPT370 */ static struct ata_port_operations hpt370_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt370_bmdma_stop, .mode_filter = hpt370_filter, .cable_detect = hpt37x_cable_detect, .set_piomode = hpt370_set_piomode, .set_dmamode = hpt370_set_dmamode, .prereset = hpt37x_pre_reset, }; /* * Configuration for HPT370A. Close to 370 but less filters */ static struct ata_port_operations hpt370a_port_ops = { .inherits = &hpt370_port_ops, .mode_filter = hpt370a_filter, }; /* * Configuration for HPT371 and HPT302. Slightly different PIO and DMA * mode setting functionality. */ static struct ata_port_operations hpt302_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt37x_bmdma_stop, .cable_detect = hpt37x_cable_detect, .set_piomode = hpt372_set_piomode, .set_dmamode = hpt372_set_dmamode, .prereset = hpt37x_pre_reset, }; /* * Configuration for HPT372. Mode setting works like 371 and 302 * but we have a mode filter. */ static struct ata_port_operations hpt372_port_ops = { .inherits = &hpt302_port_ops, .mode_filter = hpt372_filter, }; /* * Configuration for HPT374. Mode setting and filtering works like 372 * but we have a different cable detection procedure for function 1. */ static struct ata_port_operations hpt374_fn1_port_ops = { .inherits = &hpt372_port_ops, .cable_detect = hpt374_fn1_cable_detect, }; /** * hpt37x_clock_slot - Turn timing to PC clock entry * @freq: Reported frequency timing * @base: Base timing * * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50 * and 3 for 66Mhz) */ static int hpt37x_clock_slot(unsigned int freq, unsigned int base) { unsigned int f = (base * freq) / 192; /* Mhz */ if (f < 40) return 0; /* 33Mhz slot */ if (f < 45) return 1; /* 40Mhz slot */ if (f < 55) return 2; /* 50Mhz slot */ return 3; /* 60Mhz slot */ } /** * hpt37x_calibrate_dpll - Calibrate the DPLL loop * @dev: PCI device * * Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this * succeeds */ static int hpt37x_calibrate_dpll(struct pci_dev *dev) { u8 reg5b; u32 reg5c; int tries; for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, &reg5b); if (reg5b & 0x80) { /* See if it stays set */ for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, &reg5b); /* Failed ? */ if ((reg5b & 0x80) == 0) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, &reg5c); pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } /* Never went stable */ return 0; } static u32 hpt374_read_freq(struct pci_dev *pdev) { u32 freq; unsigned long io_base = pci_resource_start(pdev, 4); if (PCI_FUNC(pdev->devfn) & 1) { struct pci_dev *pdev_0; pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1); /* Someone hot plugged the controller on us ? */ if (pdev_0 == NULL) return 0; io_base = pci_resource_start(pdev_0, 4); freq = inl(io_base + 0x90); pci_dev_put(pdev_0); } else freq = inl(io_base + 0x90); return freq; } /** * hpt37x_init_one - Initialise an HPT37X/302 * @dev: PCI device * @id: Entry in match table * * Initialise an HPT37x device. There are some interesting complications * here. Firstly the chip may report 366 and be one of several variants. * Secondly all the timings depend on the clock for the chip which we must * detect and look up * * This is the known chip mappings. It may be missing a couple of later * releases. * * Chip version PCI Rev Notes * HPT366 4 (HPT366) 0 Other driver * HPT366 4 (HPT366) 1 Other driver * HPT368 4 (HPT366) 2 Other driver * HPT370 4 (HPT366) 3 UDMA100 * HPT370A 4 (HPT366) 4 UDMA100 * HPT372 4 (HPT366) 5 UDMA133 (1) * HPT372N 4 (HPT366) 6 Other driver * HPT372A 5 (HPT372) 1 UDMA133 (1) * HPT372N 5 (HPT372) 2 Other driver * HPT302 6 (HPT302) 1 UDMA133 * HPT302N 6 (HPT302) 2 Other driver * HPT371 7 (HPT371) * UDMA133 * HPT374 8 (HPT374) * UDMA133 4 channel * HPT372N 9 (HPT372N) * Other driver * * (1) UDMA133 support depends on the bus clock */ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* HPT370 - UDMA100 */ static const struct ata_port_info info_hpt370 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt370_port_ops }; /* HPT370A - UDMA100 */ static const struct ata_port_info info_hpt370a = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt370a_port_ops }; /* HPT370 - UDMA66 */ static const struct ata_port_info info_hpt370_33 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &hpt370_port_ops }; /* HPT370A - UDMA66 */ static const struct ata_port_info info_hpt370a_33 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &hpt370a_port_ops }; /* HPT372 - UDMA133 */ static const struct ata_port_info info_hpt372 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt372_port_ops }; /* HPT371, 302 - UDMA133 */ static const struct ata_port_info info_hpt302 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt302_port_ops }; /* HPT374 - UDMA100, function 1 uses different cable_detect method */ static const struct ata_port_info info_hpt374_fn0 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt372_port_ops }; static const struct ata_port_info info_hpt374_fn1 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt374_fn1_port_ops }; static const int MHz[4] = { 33, 40, 50, 66 }; void *private_data = NULL; const struct ata_port_info *ppi[] = { NULL, NULL }; u8 rev = dev->revision; u8 irqmask; u8 mcr1; u32 freq; int prefer_dpll = 1; unsigned long iobase = pci_resource_start(dev, 4); const struct hpt_chip *chip_table; int clock_slot; int rc; rc = pcim_enable_device(dev); if (rc) return rc; switch (dev->device) { case PCI_DEVICE_ID_TTI_HPT366: /* May be a later chip in disguise. Check */ /* Older chips are in the HPT366 driver. Ignore them */ if (rev < 3) return -ENODEV; /* N series chips have their own driver. Ignore */ if (rev == 6) return -ENODEV; switch (rev) { case 3: ppi[0] = &info_hpt370; chip_table = &hpt370; prefer_dpll = 0; break; case 4: ppi[0] = &info_hpt370a; chip_table = &hpt370a; prefer_dpll = 0; break; case 5: ppi[0] = &info_hpt372; chip_table = &hpt372; break; default: pr_err("Unknown HPT366 subtype, please report (%d)\n", rev); return -ENODEV; } break; case PCI_DEVICE_ID_TTI_HPT372: /* 372N if rev >= 2 */ if (rev >= 2) return -ENODEV; ppi[0] = &info_hpt372; chip_table = &hpt372a; break; case PCI_DEVICE_ID_TTI_HPT302: /* 302N if rev > 1 */ if (rev > 1) return -ENODEV; ppi[0] = &info_hpt302; /* Check this */ chip_table = &hpt302; break; case PCI_DEVICE_ID_TTI_HPT371: if (rev > 1) return -ENODEV; ppi[0] = &info_hpt302; chip_table = &hpt371; /* * Single channel device, master is not present but the BIOS * (or us for non x86) must mark it absent */ pci_read_config_byte(dev, 0x50, &mcr1); mcr1 &= ~0x04; pci_write_config_byte(dev, 0x50, mcr1); break; case PCI_DEVICE_ID_TTI_HPT374: chip_table = &hpt374; if (!(PCI_FUNC(dev->devfn) & 1)) *ppi = &info_hpt374_fn0; else *ppi = &info_hpt374_fn1; break; default: pr_err("PCI table is bogus, please report (%d)\n", dev->device); return -ENODEV; } /* Ok so this is a chip we support */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); pci_read_config_byte(dev, 0x5A, &irqmask); irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed * for some drives such as IBM-DTLA which will not enter ready * state on reset when PDIAG is a input. */ pci_write_config_byte(dev, 0x5b, 0x23); /* * HighPoint does this for HPT372A. * NOTE: This register is only writeable via I/O space. */ if (chip_table == &hpt372a) outb(0x0e, iobase + 0x9c); /* * Some devices do not let this value be accessed via PCI space * according to the old driver. In addition we must use the value * from FN 0 on the HPT374. */ if (chip_table == &hpt374) { freq = hpt374_read_freq(dev); if (freq == 0) return -ENODEV; } else freq = inl(iobase + 0x90); if ((freq >> 12) != 0xABCDE) { int i; u8 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { pci_read_config_byte(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } freq = total / 128; } freq &= 0x1FF; /* * Turn the frequency check into a band and then find a timing * table to match it. */ clock_slot = hpt37x_clock_slot(freq, chip_table->base); if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) { /* * We need to try PLL mode instead * * For non UDMA133 capable devices we should * use a 50MHz DPLL by choice */ unsigned int f_low, f_high; int dpll, adjust; /* Compute DPLL */ dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2; f_low = (MHz[clock_slot] * 48) / MHz[dpll]; f_high = f_low + 2; if (clock_slot > 1) f_high += 2; /* Select the DPLL clock. */ pci_write_config_byte(dev, 0x5b, 0x21); pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); for (adjust = 0; adjust < 8; adjust++) { if (hpt37x_calibrate_dpll(dev)) break; /* * See if it'll settle at a fractionally * different clock */ if (adjust & 1) f_low -= adjust >> 1; else f_high += adjust >> 1; pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); } if (adjust == 8) { pr_err("DPLL did not stabilize!\n"); return -ENODEV; } if (dpll == 3) private_data = (void *)hpt37x_timings_66; else private_data = (void *)hpt37x_timings_50; pr_info("bus clock %dMHz, using %dMHz DPLL\n", MHz[clock_slot], MHz[dpll]); } else { private_data = (void *)chip_table->clocks[clock_slot]; /* * Perform a final fixup. Note that we will have used the * DPLL on the HPT372 which means we don't have to worry * about lack of UDMA133 support on lower clocks */ if (clock_slot < 2 && ppi[0] == &info_hpt370) ppi[0] = &info_hpt370_33; if (clock_slot < 2 && ppi[0] == &info_hpt370a) ppi[0] = &info_hpt370a_33; pr_info("%s using %dMHz bus clock\n", chip_table->name, MHz[clock_slot]); } /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } static const struct pci_device_id hpt37x[] = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, { }, }; static struct pci_driver hpt37x_pci_driver = { .name = DRV_NAME, .id_table = hpt37x, .probe = hpt37x_init_one, .remove = ata_pci_remove_one }; module_pci_driver(hpt37x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt37x); MODULE_VERSION(DRV_VERSION);
gpl-2.0
smaeul/kernel_samsung_aries
drivers/mtd/devices/pmc551.c
3033
25878
/* * PMC551 PCI Mezzanine Ram Device * * Author: * Mark Ferrell <mferrell@mvista.com> * Copyright 1999,2000 Nortel Networks * * License: * As part of this driver was derived from the slram.c driver it * falls under the same license, which is GNU General Public * License v2 * * Description: * This driver is intended to support the PMC551 PCI Ram device * from Ramix Inc. The PMC551 is a PMC Mezzanine module for * cPCI embedded systems. The device contains a single SROM * that initially programs the V370PDC chipset onboard the * device, and various banks of DRAM/SDRAM onboard. This driver * implements this PCI Ram device as an MTD (Memory Technology * Device) so that it can be used to hold a file system, or for * added swap space in embedded systems. Since the memory on * this board isn't as fast as main memory we do not try to hook * it into main memory as that would simply reduce performance * on the system. Using it as a block device allows us to use * it as high speed swap or for a high speed disk device of some * sort. Which becomes very useful on diskless systems in the * embedded market I might add. * * Notes: * Due to what I assume is more buggy SROM, the 64M PMC551 I * have available claims that all 4 of its DRAM banks have 64MiB * of ram configured (making a grand total of 256MiB onboard). * This is slightly annoying since the BAR0 size reflects the * aperture size, not the dram size, and the V370PDC supplies no * other method for memory size discovery. This problem is * mostly only relevant when compiled as a module, as the * unloading of the module with an aperture size smaller than * the ram will cause the driver to detect the onboard memory * size to be equal to the aperture size when the module is * reloaded. Soooo, to help, the module supports an msize * option to allow the specification of the onboard memory, and * an asize option, to allow the specification of the aperture * size. The aperture must be equal to or less then the memory * size, the driver will correct this if you screw it up. This * problem is not relevant for compiled in drivers as compiled * in drivers only init once. * * Credits: * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the * initial example code of how to initialize this device and for * help with questions I had concerning operation of the device. * * Most of the MTD code for this driver was originally written * for the slram.o module in the MTD drivers package which * allows the mapping of system memory into an MTD device. * Since the PMC551 memory module is accessed in the same * fashion as system memory, the slram.c code became a very nice * fit to the needs of this driver. All we added was PCI * detection/initialization to the driver and automatically figure * out the size via the PCI detection.o, later changes by Corey * Minyard set up the card to utilize a 1M sliding apature. * * Corey Minyard <minyard@nortelnetworks.com> * * Modified driver to utilize a sliding aperture instead of * mapping all memory into kernel space which turned out to * be very wasteful. * * Located a bug in the SROM's initialization sequence that * made the memory unusable, added a fix to code to touch up * the DRAM some. * * Bugs/FIXMEs: * * MUST fix the init function to not spin on a register * waiting for it to set .. this does not safely handle busted * devices that never reset the register correctly which will * cause the system to hang w/ a reboot being the only chance at * recover. [sort of fixed, could be better] * * Add I2C handling of the SROM so we can read the SROM's information * about the aperture size. This should always accurately reflect the * onboard memory size. * * Comb the init routine. It's still a bit cludgy on a few things. */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/uaccess.h> #include <linux/types.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <asm/io.h> #include <asm/system.h> #include <linux/pci.h> #include <linux/mtd/mtd.h> #include <linux/mtd/pmc551.h> static struct mtd_info *pmc551list; static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) { struct mypriv *priv = mtd->priv; u32 soff_hi, soff_lo; /* start address offset hi/lo */ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ unsigned long end; u_char *ptr; size_t retlen; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len); #endif end = instr->addr + instr->len - 1; /* Is it past the end? */ if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size); #endif return -EINVAL; } eoff_hi = end & ~(priv->asize - 1); soff_hi = instr->addr & ~(priv->asize - 1); eoff_lo = end & (priv->asize - 1); soff_lo = instr->addr & (priv->asize - 1); pmc551_point(mtd, instr->addr, instr->len, &retlen, (void **)&ptr, NULL); if (soff_hi == eoff_hi || mtd->size == priv->asize) { /* The whole thing fits within one access, so just one shot will do it. */ memset(ptr, 0xff, instr->len); } else { /* We have to do multiple writes to get all the data written. */ while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, " "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif memset(ptr, 0xff, priv->asize); if (soff_hi + priv->asize >= mtd->size) { goto out; } soff_hi += priv->asize; pmc551_point(mtd, (priv->base_map0 | soff_hi), priv->asize, &retlen, (void **)&ptr, NULL); } memset(ptr, 0xff, eoff_lo); } out: instr->state = MTD_ERASE_DONE; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_erase() done\n"); #endif mtd_erase_callback(instr); return 0; } static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { struct mypriv *priv = mtd->priv; u32 soff_hi; u32 soff_lo; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); #endif if (from + len > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from + len, (long)mtd->size); #endif return -EINVAL; } /* can we return a physical address with this driver? */ if (phys) return -EINVAL; soff_hi = from & ~(priv->asize - 1); soff_lo = from & (priv->asize - 1); /* Cheap hack optimization */ if (priv->curr_map0 != from) { pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0, (priv->base_map0 | soff_hi)); priv->curr_map0 = soff_hi; } *virt = priv->start + soff_lo; *retlen = len; return 0; } static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_unpoint()\n"); #endif } static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) { struct mypriv *priv = mtd->priv; u32 soff_hi, soff_lo; /* start address offset hi/lo */ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ unsigned long end; u_char *ptr; u_char *copyto = buf; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize); #endif end = from + len - 1; /* Is it past the end? */ if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size); #endif return -EINVAL; } soff_hi = from & ~(priv->asize - 1); eoff_hi = end & ~(priv->asize - 1); soff_lo = from & (priv->asize - 1); eoff_lo = end & (priv->asize - 1); pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL); if (soff_hi == eoff_hi) { /* The whole thing fits within one access, so just one shot will do it. */ memcpy(copyto, ptr, len); copyto += len; } else { /* We have to do multiple writes to get all the data written. */ while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, " "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif memcpy(copyto, ptr, priv->asize); copyto += priv->asize; if (soff_hi + priv->asize >= mtd->size) { goto out; } soff_hi += priv->asize; pmc551_point(mtd, soff_hi, priv->asize, retlen, (void **)&ptr, NULL); } memcpy(copyto, ptr, eoff_lo); copyto += eoff_lo; } out: #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_read() done\n"); #endif *retlen = copyto - buf; return 0; } static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) { struct mypriv *priv = mtd->priv; u32 soff_hi, soff_lo; /* start address offset hi/lo */ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ unsigned long end; u_char *ptr; const u_char *copyfrom = buf; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize); #endif end = to + len - 1; /* Is it past the end? or did the u32 wrap? */ if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, " "size: %ld, to: %ld)\n", (long)end, (long)mtd->size, (long)to); #endif return -EINVAL; } soff_hi = to & ~(priv->asize - 1); eoff_hi = end & ~(priv->asize - 1); soff_lo = to & (priv->asize - 1); eoff_lo = end & (priv->asize - 1); pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL); if (soff_hi == eoff_hi) { /* The whole thing fits within one access, so just one shot will do it. */ memcpy(ptr, copyfrom, len); copyfrom += len; } else { /* We have to do multiple writes to get all the data written. */ while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, " "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif memcpy(ptr, copyfrom, priv->asize); copyfrom += priv->asize; if (soff_hi >= mtd->size) { goto out; } soff_hi += priv->asize; pmc551_point(mtd, soff_hi, priv->asize, retlen, (void **)&ptr, NULL); } memcpy(ptr, copyfrom, eoff_lo); copyfrom += eoff_lo; } out: #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_write() done\n"); #endif *retlen = copyfrom - buf; return 0; } /* * Fixup routines for the V370PDC * PCI device ID 0x020011b0 * * This function basically kick starts the DRAM oboard the card and gets it * ready to be used. Before this is done the device reads VERY erratic, so * much that it can crash the Linux 2.2.x series kernels when a user cat's * /proc/pci .. though that is mainly a kernel bug in handling the PCI DEVSEL * register. FIXME: stop spinning on registers .. must implement a timeout * mechanism * returns the size of the memory region found. */ static u32 fixup_pmc551(struct pci_dev *dev) { #ifdef CONFIG_MTD_PMC551_BUGFIX u32 dram_data; #endif u32 size, dcmd, cfg, dtmp; u16 cmd, tmp, i; u8 bcmd, counter; /* Sanity Check */ if (!dev) { return -ENODEV; } /* * Attempt to reset the card * FIXME: Stop Spinning registers */ counter = 0; /* unlock registers */ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5); /* read in old data */ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd); /* bang the reset line up and down for a few */ for (i = 0; i < 10; i++) { counter = 0; bcmd &= ~0x80; while (counter++ < 100) { pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); } counter = 0; bcmd |= 0x80; while (counter++ < 100) { pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); } } bcmd |= (0x40 | 0x20); pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); /* * Take care and turn off the memory on the device while we * tweak the configurations */ pci_read_config_word(dev, PCI_COMMAND, &cmd); tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); pci_write_config_word(dev, PCI_COMMAND, tmp); /* * Disable existing aperture before probing memory size */ pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd); dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN); pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp); /* * Grab old BAR0 config so that we can figure out memory size * This is another bit of kludge going on. The reason for the * redundancy is I am hoping to retain the original configuration * previously assigned to the card by the BIOS or some previous * fixup routine in the kernel. So we read the old config into cfg, * then write all 1's to the memory space, read back the result into * "size", and then write back all the old config. */ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg); #ifndef CONFIG_MTD_PMC551_BUGFIX pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0); pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size); size = (size & PCI_BASE_ADDRESS_MEM_MASK); size &= ~(size - 1); pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg); #else /* * Get the size of the memory by reading all the DRAM size values * and adding them up. * * KLUDGE ALERT: the boards we are using have invalid column and * row mux values. We fix them here, but this will break other * memory configurations. */ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data); size = PMC551_DRAM_BLK_GET_SIZE(dram_data); dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data); pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data); size += PMC551_DRAM_BLK_GET_SIZE(dram_data); dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data); pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data); size += PMC551_DRAM_BLK_GET_SIZE(dram_data); dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data); pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data); size += PMC551_DRAM_BLK_GET_SIZE(dram_data); dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data); /* * Oops .. something went wrong */ if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) { return -ENODEV; } #endif /* CONFIG_MTD_PMC551_BUGFIX */ if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) { return -ENODEV; } /* * Precharge Dram */ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400); pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf); /* * Wait until command has gone through * FIXME: register spinning issue */ do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); if (counter++ > 100) break; } while ((PCI_COMMAND_IO) & cmd); /* * Turn on auto refresh * The loop is taken directly from Ramix's example code. I assume that * this must be held high for some duration of time, but I can find no * documentation refrencing the reasons why. */ for (i = 1; i <= 8; i++) { pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df); /* * Make certain command has gone through * FIXME: register spinning issue */ counter = 0; do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); if (counter++ > 100) break; } while ((PCI_COMMAND_IO) & cmd); } pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020); pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff); /* * Wait until command completes * FIXME: register spinning issue */ counter = 0; do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); if (counter++ > 100) break; } while ((PCI_COMMAND_IO) & cmd); pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd); dcmd |= 0x02000000; pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd); /* * Check to make certain fast back-to-back, if not * then set it so */ pci_read_config_word(dev, PCI_STATUS, &cmd); if ((cmd & PCI_COMMAND_FAST_BACK) == 0) { cmd |= PCI_COMMAND_FAST_BACK; pci_write_config_word(dev, PCI_STATUS, cmd); } /* * Check to make certain the DEVSEL is set correctly, this device * has a tendency to assert DEVSEL and TRDY when a write is performed * to the memory when memory is read-only */ if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) { cmd &= ~PCI_STATUS_DEVSEL_MASK; pci_write_config_word(dev, PCI_STATUS, cmd); } /* * Set to be prefetchable and put everything back based on old cfg. * it's possible that the reset of the V370PDC nuked the original * setup */ /* cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH; pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); */ /* * Turn PCI memory and I/O bus access back on */ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_IO); #ifdef CONFIG_MTD_PMC551_DEBUG /* * Some screen fun */ printk(KERN_DEBUG "pmc551: %d%sB (0x%x) of %sprefetchable memory at " "0x%llx\n", (size < 1024) ? size : (size < 1048576) ? size >> 10 : size >> 20, (size < 1024) ? "" : (size < 1048576) ? "Ki" : "Mi", size, ((dcmd & (0x1 << 3)) == 0) ? "non-" : "", (unsigned long long)pci_resource_start(dev, 0)); /* * Check to see the state of the memory */ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd); printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n" "pmc551: DRAM_BLK0 Size: %d at %d\n" "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n", (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", PMC551_DRAM_BLK_GET_SIZE(dcmd), ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), ((dcmd >> 9) & 0xF)); pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd); printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n" "pmc551: DRAM_BLK1 Size: %d at %d\n" "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n", (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", PMC551_DRAM_BLK_GET_SIZE(dcmd), ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), ((dcmd >> 9) & 0xF)); pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd); printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n" "pmc551: DRAM_BLK2 Size: %d at %d\n" "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n", (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", PMC551_DRAM_BLK_GET_SIZE(dcmd), ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), ((dcmd >> 9) & 0xF)); pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd); printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n" "pmc551: DRAM_BLK3 Size: %d at %d\n" "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n", (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", PMC551_DRAM_BLK_GET_SIZE(dcmd), ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), ((dcmd >> 9) & 0xF)); pci_read_config_word(dev, PCI_COMMAND, &cmd); printk(KERN_DEBUG "pmc551: Memory Access %s\n", (((0x1 << 1) & cmd) == 0) ? "off" : "on"); printk(KERN_DEBUG "pmc551: I/O Access %s\n", (((0x1 << 0) & cmd) == 0) ? "off" : "on"); pci_read_config_word(dev, PCI_STATUS, &cmd); printk(KERN_DEBUG "pmc551: Devsel %s\n", ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" : ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" : ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid"); printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n", ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : ""); pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd); printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n" "pmc551: System Control Register is %slocked to PCI access\n" "pmc551: System Control Register is %slocked to EEPROM access\n", (bcmd & 0x1) ? "software" : "hardware", (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un"); #endif return size; } /* * Kernel version specific module stuffages */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>"); MODULE_DESCRIPTION(PMC551_VERSION); /* * Stuff these outside the ifdef so as to not bust compiled in driver support */ static int msize = 0; static int asize = 0; module_param(msize, int, 0); MODULE_PARM_DESC(msize, "memory size in MiB [1 - 1024]"); module_param(asize, int, 0); MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]"); /* * PMC551 Card Initialization */ static int __init init_pmc551(void) { struct pci_dev *PCI_Device = NULL; struct mypriv *priv; int found = 0; struct mtd_info *mtd; u32 length = 0; if (msize) { msize = (1 << (ffs(msize) - 1)) << 20; if (msize > (1 << 30)) { printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize); return -EINVAL; } } if (asize) { asize = (1 << (ffs(asize) - 1)) << 20; if (asize > (1 << 30)) { printk(KERN_NOTICE "pmc551: Invalid aperture size " "[%d]\n", asize); return -EINVAL; } } printk(KERN_INFO PMC551_VERSION); /* * PCU-bus chipset probe. */ for (;;) { if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, PCI_DEVICE_ID_V3_SEMI_V370PDC, PCI_Device)) == NULL) { break; } printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n", (unsigned long long)pci_resource_start(PCI_Device, 0)); /* * The PMC551 device acts VERY weird if you don't init it * first. i.e. it will not correctly report devsel. If for * some reason the sdram is in a wrote-protected state the * device will DEVSEL when it is written to causing problems * with the oldproc.c driver in * some kernels (2.2.*) */ if ((length = fixup_pmc551(PCI_Device)) <= 0) { printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n"); break; } /* * This is needed until the driver is capable of reading the * onboard I2C SROM to discover the "real" memory size. */ if (msize) { length = msize; printk(KERN_NOTICE "pmc551: Using specified memory " "size 0x%x\n", length); } else { msize = length; } mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!mtd) { printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " "device.\n"); break; } priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL); if (!priv) { printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " "device.\n"); kfree(mtd); break; } mtd->priv = priv; priv->dev = PCI_Device; if (asize > length) { printk(KERN_NOTICE "pmc551: reducing aperture size to " "fit %dM\n", length >> 20); priv->asize = asize = length; } else if (asize == 0 || asize == length) { printk(KERN_NOTICE "pmc551: Using existing aperture " "size %dM\n", length >> 20); priv->asize = asize = length; } else { printk(KERN_NOTICE "pmc551: Using specified aperture " "size %dM\n", asize >> 20); priv->asize = asize; } priv->start = pci_iomap(PCI_Device, 0, priv->asize); if (!priv->start) { printk(KERN_NOTICE "pmc551: Unable to map IO space\n"); kfree(mtd->priv); kfree(mtd); break; } #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551: setting aperture to %d\n", ffs(priv->asize >> 20) - 1); #endif priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN | PMC551_PCI_MEM_MAP_ENABLE | (ffs(priv->asize >> 20) - 1) << 4); priv->curr_map0 = priv->base_map0; pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0, priv->curr_map0); #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551: aperture set to %d\n", (priv->base_map0 & 0xF0) >> 4); #endif mtd->size = msize; mtd->flags = MTD_CAP_RAM; mtd->erase = pmc551_erase; mtd->read = pmc551_read; mtd->write = pmc551_write; mtd->point = pmc551_point; mtd->unpoint = pmc551_unpoint; mtd->type = MTD_RAM; mtd->name = "PMC551 RAM board"; mtd->erasesize = 0x10000; mtd->writesize = 1; mtd->owner = THIS_MODULE; if (mtd_device_register(mtd, NULL, 0)) { printk(KERN_NOTICE "pmc551: Failed to register new device\n"); pci_iounmap(PCI_Device, priv->start); kfree(mtd->priv); kfree(mtd); break; } /* Keep a reference as the mtd_device_register worked */ pci_dev_get(PCI_Device); printk(KERN_NOTICE "Registered pmc551 memory device.\n"); printk(KERN_NOTICE "Mapped %dMiB of memory from 0x%p to 0x%p\n", priv->asize >> 20, priv->start, priv->start + priv->asize); printk(KERN_NOTICE "Total memory is %d%sB\n", (length < 1024) ? length : (length < 1048576) ? length >> 10 : length >> 20, (length < 1024) ? "" : (length < 1048576) ? "Ki" : "Mi"); priv->nextpmc551 = pmc551list; pmc551list = mtd; found++; } /* Exited early, reference left over */ if (PCI_Device) pci_dev_put(PCI_Device); if (!pmc551list) { printk(KERN_NOTICE "pmc551: not detected\n"); return -ENODEV; } else { printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found); return 0; } } /* * PMC551 Card Cleanup */ static void __exit cleanup_pmc551(void) { int found = 0; struct mtd_info *mtd; struct mypriv *priv; while ((mtd = pmc551list)) { priv = mtd->priv; pmc551list = priv->nextpmc551; if (priv->start) { printk(KERN_DEBUG "pmc551: unmapping %dMiB starting at " "0x%p\n", priv->asize >> 20, priv->start); pci_iounmap(priv->dev, priv->start); } pci_dev_put(priv->dev); kfree(mtd->priv); mtd_device_unregister(mtd); kfree(mtd); found++; } printk(KERN_NOTICE "pmc551: %d pmc551 devices unloaded\n", found); } module_init(init_pmc551); module_exit(cleanup_pmc551);
gpl-2.0
david-visteon/linux-3.0.101
net/sched/sch_prio.c
3033
8541
/* * net/sched/sch_prio.c Simple 3-band priority "scheduler". * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: * Init -- EINVAL when opt undefined */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct prio_sched_data { int bands; struct tcf_proto *filter_list; u8 prio2band[TC_PRIO_MAX+1]; struct Qdisc *queues[TCQ_PRIO_BANDS]; }; static struct Qdisc * prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct prio_sched_data *q = qdisc_priv(sch); u32 band = skb->priority; struct tcf_result res; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif if (!q->filter_list || err < 0) { if (TC_H_MAJ(band)) band = 0; return q->queues[q->prio2band[band & TC_PRIO_MAX]]; } band = res.classid; } band = TC_H_MIN(band) - 1; if (band >= q->bands) return q->queues[q->prio2band[0]]; return q->queues[band]; } static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct Qdisc *qdisc; int ret; qdisc = prio_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } #endif ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; return ret; } static struct sk_buff *prio_peek(struct Qdisc *sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < q->bands; prio++) { struct Qdisc *qdisc = q->queues[prio]; struct sk_buff *skb = qdisc->ops->peek(qdisc); if (skb) return skb; } return NULL; } static struct sk_buff *prio_dequeue(struct Qdisc *sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < q->bands; prio++) { struct Qdisc *qdisc = q->queues[prio]; struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } } return NULL; } static unsigned int prio_drop(struct Qdisc *sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; unsigned int len; struct Qdisc *qdisc; for (prio = q->bands-1; prio >= 0; prio--) { qdisc = q->queues[prio]; if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { sch->q.qlen--; return len; } } return 0; } static void prio_reset(struct Qdisc *sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); sch->q.qlen = 0; } static void prio_destroy(struct Qdisc *sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (prio = 0; prio < q->bands; prio++) qdisc_destroy(q->queues[prio]); } static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); struct tc_prio_qopt *qopt; int i; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; for (i = 0; i <= TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; if (child != &noop_qdisc) { qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (old != &noop_qdisc) { qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; } static int prio_init(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); int i; for (i = 0; i < TCQ_PRIO_BANDS; i++) q->queues[i] = &noop_qdisc; if (opt == NULL) { return -EINVAL; } else { int err; if ((err = prio_tune(sch, opt)) != 0) return err; } return 0; } static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct prio_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_prio_qopt opt; opt.bands = q->bands; memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->queues[band]; q->queues[band] = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc * prio_leaf(struct Qdisc *sch, unsigned long arg) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; return q->queues[band]; } static unsigned long prio_get(struct Qdisc *sch, u32 classid) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return prio_get(sch, classid); } static void prio_put(struct Qdisc *q, unsigned long cl) { } static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct prio_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = q->queues[cl-1]->handle; return 0; } static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct prio_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; cl_q->qstats.qlen = cl_q->q.qlen; if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, &cl_q->qstats) < 0) return -1; return 0; } static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct prio_sched_data *q = qdisc_priv(sch); int prio; if (arg->stop) return; for (prio = 0; prio < q->bands; prio++) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, prio + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static const struct Qdisc_class_ops prio_class_ops = { .graft = prio_graft, .leaf = prio_leaf, .get = prio_get, .put = prio_put, .walk = prio_walk, .tcf_chain = prio_find_tcf, .bind_tcf = prio_bind, .unbind_tcf = prio_put, .dump = prio_dump_class, .dump_stats = prio_dump_class_stats, }; static struct Qdisc_ops prio_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &prio_class_ops, .id = "prio", .priv_size = sizeof(struct prio_sched_data), .enqueue = prio_enqueue, .dequeue = prio_dequeue, .peek = prio_peek, .drop = prio_drop, .init = prio_init, .reset = prio_reset, .destroy = prio_destroy, .change = prio_tune, .dump = prio_dump, .owner = THIS_MODULE, }; static int __init prio_module_init(void) { return register_qdisc(&prio_qdisc_ops); } static void __exit prio_module_exit(void) { unregister_qdisc(&prio_qdisc_ops); } module_init(prio_module_init) module_exit(prio_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
felixsch/linux
arch/arm/mach-omap1/board-fsample.c
3545
8847
/* * linux/arch/arm/mach-omap1/board-fsample.c * * Modified from board-perseus2.c * * Original OMAP730 support by Jean Pihet <j-pihet@ti.com> * Updated for 2.6 by Kevin Hilman <kjh@hilman.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/smc91x.h> #include <linux/omapfb.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/tc.h> #include <mach/mux.h> #include <mach/flash.h> #include <linux/platform_data/keypad-omap.h> #include <mach/hardware.h> #include "iomap.h" #include "common.h" #include "fpga.h" /* fsample is pretty close to p2-sample */ #define fsample_cpld_read(reg) __raw_readb(reg) #define fsample_cpld_write(val, reg) __raw_writeb(val, reg) #define FSAMPLE_CPLD_BASE 0xE8100000 #define FSAMPLE_CPLD_SIZE SZ_4K #define FSAMPLE_CPLD_START 0x05080000 #define FSAMPLE_CPLD_REG_A (FSAMPLE_CPLD_BASE + 0x00) #define FSAMPLE_CPLD_SWITCH (FSAMPLE_CPLD_BASE + 0x02) #define FSAMPLE_CPLD_UART (FSAMPLE_CPLD_BASE + 0x02) #define FSAMPLE_CPLD_REG_B (FSAMPLE_CPLD_BASE + 0x04) #define FSAMPLE_CPLD_VERSION (FSAMPLE_CPLD_BASE + 0x06) #define FSAMPLE_CPLD_SET_CLR (FSAMPLE_CPLD_BASE + 0x06) #define FSAMPLE_CPLD_BIT_BT_RESET 0 #define FSAMPLE_CPLD_BIT_LCD_RESET 1 #define FSAMPLE_CPLD_BIT_CAM_PWDN 2 #define FSAMPLE_CPLD_BIT_CHARGER_ENABLE 3 #define FSAMPLE_CPLD_BIT_SD_MMC_EN 4 #define FSAMPLE_CPLD_BIT_aGPS_PWREN 5 #define FSAMPLE_CPLD_BIT_BACKLIGHT 6 #define FSAMPLE_CPLD_BIT_aGPS_EN_RESET 7 #define FSAMPLE_CPLD_BIT_aGPS_SLEEPx_N 8 #define FSAMPLE_CPLD_BIT_OTG_RESET 9 #define fsample_cpld_set(bit) \ fsample_cpld_write((((bit) & 15) << 4) | 0x0f, FSAMPLE_CPLD_SET_CLR) #define fsample_cpld_clear(bit) \ fsample_cpld_write(0xf0 | ((bit) & 15), FSAMPLE_CPLD_SET_CLR) static const unsigned int fsample_keymap[] = { KEY(0, 0, KEY_UP), KEY(1, 0, KEY_RIGHT), KEY(2, 0, KEY_LEFT), KEY(3, 0, KEY_DOWN), KEY(4, 0, KEY_ENTER), KEY(0, 1, KEY_F10), KEY(1, 1, KEY_SEND), KEY(2, 1, KEY_END), KEY(3, 1, KEY_VOLUMEDOWN), KEY(4, 1, KEY_VOLUMEUP), KEY(5, 1, KEY_RECORD), KEY(0, 2, KEY_F9), KEY(1, 2, KEY_3), KEY(2, 2, KEY_6), KEY(3, 2, KEY_9), KEY(4, 2, KEY_KPDOT), KEY(0, 3, KEY_BACK), KEY(1, 3, KEY_2), KEY(2, 3, KEY_5), KEY(3, 3, KEY_8), KEY(4, 3, KEY_0), KEY(5, 3, KEY_KPSLASH), KEY(0, 4, KEY_HOME), KEY(1, 4, KEY_1), KEY(2, 4, KEY_4), KEY(3, 4, KEY_7), KEY(4, 4, KEY_KPASTERISK), KEY(5, 4, KEY_POWER), }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { [0] = { .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ .end = H2P2_DBG_FPGA_ETHR_START + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .start = INT_7XX_MPU_EXT_NIRQ, .end = 0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static void __init fsample_init_smc91x(void) { __raw_writeb(1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); __raw_writeb(__raw_readb(H2P2_DBG_FPGA_LAN_RESET) & ~1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); } static struct mtd_partition nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* rest of flash is a file system */ { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 }, }; static struct physmap_flash_data nor_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = nor_partitions, .nr_parts = ARRAY_SIZE(nor_partitions), }; static struct resource nor_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nor_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &nor_data, }, .num_resources = 1, .resource = &nor_resource, }; #define FSAMPLE_NAND_RB_GPIO_PIN 62 static int nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN); } static struct platform_nand_data nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .options = NAND_SAMSUNG_LP_OPTIONS, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, .dev_ready = nand_dev_ready, }, }; static struct resource nand_resource = { .start = OMAP_CS3_PHYS, .end = OMAP_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nand_device = { .name = "gen_nand", .id = 0, .dev = { .platform_data = &nand_data, }, .num_resources = 1, .resource = &nand_resource, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource kp_resources[] = { [0] = { .start = INT_7XX_MPUIO_KEYPAD, .end = INT_7XX_MPUIO_KEYPAD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data fsample_keymap_data = { .keymap = fsample_keymap, .keymap_size = ARRAY_SIZE(fsample_keymap), }; static struct omap_kp_platform_data kp_data = { .rows = 8, .cols = 8, .keymap_data = &fsample_keymap_data, .delay = 4, }; static struct platform_device kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &kp_data, }, .num_resources = ARRAY_SIZE(kp_resources), .resource = kp_resources, }; static struct platform_device *devices[] __initdata = { &nor_device, &nand_device, &smc91x_device, &kp_device, }; static struct omap_lcd_config fsample_lcd_config = { .ctrl_name = "internal", }; static void __init omap_fsample_init(void) { /* Early, board-dependent init */ /* * Hold GSM Reset until needed */ omap_writew(omap_readw(OMAP7XX_DSP_M_CTL) & ~1, OMAP7XX_DSP_M_CTL); /* * UARTs -> done automagically by 8250 driver */ /* * CSx timings, GPIO Mux ... setup */ /* Flash: CS0 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_0); omap_writel(0x00000088, OMAP7XX_FLASH_ACFG_0); /* * Ethernet support through the debug board * CS1 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_1); omap_writel(0x00000000, OMAP7XX_FLASH_ACFG_1); /* * Configure MPU_EXT_NIRQ IO in IO_CONF9 register, * It is used as the Ethernet controller interrupt */ omap_writel(omap_readl(OMAP7XX_IO_CONF_9) & 0x1FFFFFFF, OMAP7XX_IO_CONF_9); fsample_init_smc91x(); BUG_ON(gpio_request(FSAMPLE_NAND_RB_GPIO_PIN, "NAND ready") < 0); gpio_direction_input(FSAMPLE_NAND_RB_GPIO_PIN); omap_cfg_reg(L3_1610_FLASH_CS2B_OE); omap_cfg_reg(M8_1610_FLASH_CS2B_WE); /* Mux pins for keypad */ omap_cfg_reg(E2_7XX_KBR0); omap_cfg_reg(J7_7XX_KBR1); omap_cfg_reg(E1_7XX_KBR2); omap_cfg_reg(F3_7XX_KBR3); omap_cfg_reg(D2_7XX_KBR4); omap_cfg_reg(C2_7XX_KBC0); omap_cfg_reg(D3_7XX_KBC1); omap_cfg_reg(E4_7XX_KBC2); omap_cfg_reg(F4_7XX_KBC3); omap_cfg_reg(E3_7XX_KBC4); platform_add_devices(devices, ARRAY_SIZE(devices)); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); omapfb_set_lcd_config(&fsample_lcd_config); } /* Only FPGA needs to be mapped here. All others are done with ioremap */ static struct map_desc omap_fsample_io_desc[] __initdata = { { .virtual = H2P2_DBG_FPGA_BASE, .pfn = __phys_to_pfn(H2P2_DBG_FPGA_START), .length = H2P2_DBG_FPGA_SIZE, .type = MT_DEVICE }, { .virtual = FSAMPLE_CPLD_BASE, .pfn = __phys_to_pfn(FSAMPLE_CPLD_START), .length = FSAMPLE_CPLD_SIZE, .type = MT_DEVICE } }; static void __init omap_fsample_map_io(void) { omap15xx_map_io(); iotable_init(omap_fsample_io_desc, ARRAY_SIZE(omap_fsample_io_desc)); } MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample") /* Maintainer: Brian Swetland <swetland@google.com> */ .atag_offset = 0x100, .map_io = omap_fsample_map_io, .init_early = omap1_init_early, .init_irq = omap1_init_irq, .init_machine = omap_fsample_init, .init_late = omap1_init_late, .init_time = omap1_timer_init, .restart = omap1_restart, MACHINE_END
gpl-2.0
laufersteppenwolf/android_kernel_htc_memul
arch/arm/mach-s5p64x0/mach-smdk6450.c
4825
7275
/* linux/arch/arm/mach-s5p64x0/mach-smdk6450.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/pwm_backlight.h> #include <linux/fb.h> #include <linux/mmc/host.h> #include <video/platform_lcd.h> #include <asm/hardware/vic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/i2c.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/pll.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/s5p-time.h> #include <plat/backlight.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include <plat/sdhci.h> #include "common.h" #define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDK6450_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDK6450_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_TXTRIG16 | \ S3C2410_UFCON_RXTRIG8) static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [4] = { .hwport = 4, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [5] = { .hwport = 5, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif }; /* Frame Buffer */ static struct s3c_fb_pd_win smdk6450_fb_win0 = { .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 24, }; static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = { .win[0] = &smdk6450_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, }; /* LCD power controller */ static void smdk6450_lte480_reset_power(struct plat_lcd_data *pd, unsigned int power) { int err; if (power) { err = gpio_request(S5P6450_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for lcd reset\n"); return; } gpio_direction_output(S5P6450_GPN(5), 1); gpio_set_value(S5P6450_GPN(5), 0); gpio_set_value(S5P6450_GPN(5), 1); gpio_free(S5P6450_GPN(5)); } } static struct plat_lcd_data smdk6450_lcd_power_data = { .set_power = smdk6450_lte480_reset_power, }; static struct platform_device smdk6450_lcd_lte480wv = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6450_lcd_power_data, }; static struct platform_device *smdk6450_devices[] __initdata = { &s3c_device_adc, &s3c_device_rtc, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_ts, &s3c_device_wdt, &samsung_asoc_dma, &s5p6450_device_iis0, &s3c_device_fb, &smdk6450_lcd_lte480wv, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, /* s5p6450_device_spi0 will be added */ }; static struct s3c_sdhci_platdata smdk6450_hsmmc0_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c_sdhci_platdata smdk6450_hsmmc1_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, #if defined(CONFIG_S5P64X0_SD_CH1_8BIT) .max_width = 8, .host_caps = MMC_CAP_8_BIT_DATA, #endif }; static struct s3c_sdhci_platdata smdk6450_hsmmc2_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c2410_platform_i2c s5p6450_i2c0_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c0_cfg_gpio, }; static struct s3c2410_platform_i2c s5p6450_i2c1_data __initdata = { .flags = 0, .bus_num = 1, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c1_cfg_gpio, }; static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = { { I2C_BOARD_INFO("wm8580", 0x1b), }, { I2C_BOARD_INFO("24c08", 0x50), }, /* Samsung KS24C080C EEPROM */ }; static struct i2c_board_info smdk6450_i2c_devs1[] __initdata = { { I2C_BOARD_INFO("24c128", 0x57), },/* Samsung S524AD0XD1 EEPROM */ }; /* LCD Backlight data */ static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = { .no = S5P6450_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6450_bl_data = { .pwm_id = 1, }; static void __init smdk6450_map_io(void) { s5p64x0_init_io(NULL, 0); s3c24xx_init_clocks(19200000); s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void s5p6450_set_lcd_interface(void) { unsigned int cfg; /* select TFT LCD type (RGB I/F) */ cfg = __raw_readl(S5P64X0_SPCON0); cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK; cfg |= S5P64X0_SPCON0_LCD_SEL_RGB; __raw_writel(cfg, S5P64X0_SPCON0); } static void __init smdk6450_machine_init(void) { s3c24xx_ts_set_platdata(NULL); s3c_i2c0_set_platdata(&s5p6450_i2c0_data); s3c_i2c1_set_platdata(&s5p6450_i2c1_data); i2c_register_board_info(0, smdk6450_i2c_devs0, ARRAY_SIZE(smdk6450_i2c_devs0)); i2c_register_board_info(1, smdk6450_i2c_devs1, ARRAY_SIZE(smdk6450_i2c_devs1)); samsung_bl_set(&smdk6450_bl_gpio_info, &smdk6450_bl_data); s5p6450_set_lcd_interface(); s3c_fb_set_platdata(&smdk6450_lcd_pdata); s3c_sdhci0_set_platdata(&smdk6450_hsmmc0_pdata); s3c_sdhci1_set_platdata(&smdk6450_hsmmc1_pdata); s3c_sdhci2_set_platdata(&smdk6450_hsmmc2_pdata); platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices)); } MACHINE_START(SMDK6450, "SMDK6450") /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ .atag_offset = 0x100, .init_irq = s5p6450_init_irq, .handle_irq = vic_handle_irq, .map_io = smdk6450_map_io, .init_machine = smdk6450_machine_init, .timer = &s5p_timer, .restart = s5p64x0_restart, MACHINE_END
gpl-2.0
CyanogenMod/android_kernel_samsung_d2
arch/arm/mach-s3c64xx/mach-smartq7.c
4825
3809
/* * linux/arch/arm/mach-s3c64xx/mach-smartq7.c * * Copyright (C) 2010 Maurus Cuelenaere * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/init.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/map.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <plat/regs-fb-v4.h> #include "common.h" #include "mach-smartq.h" static struct gpio_led smartq7_leds[] = { { .name = "smartq7:red", .active_low = 1, .gpio = S3C64XX_GPN(8), }, { .name = "smartq7:green", .active_low = 1, .gpio = S3C64XX_GPN(9), }, }; static struct gpio_led_platform_data smartq7_led_data = { .num_leds = ARRAY_SIZE(smartq7_leds), .leds = smartq7_leds, }; static struct platform_device smartq7_leds_device = { .name = "leds-gpio", .id = -1, .dev.platform_data = &smartq7_led_data, }; /* Labels according to the SmartQ manual */ static struct gpio_keys_button smartq7_buttons[] = { { .gpio = S3C64XX_GPL(14), .code = KEY_POWER, .desc = "Power", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(2), .code = KEY_FN, .desc = "Function", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(3), .code = KEY_KPMINUS, .desc = "Minus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(4), .code = KEY_KPPLUS, .desc = "Plus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(12), .code = KEY_ENTER, .desc = "Enter", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(15), .code = KEY_ESC, .desc = "Cancel", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, }; static struct gpio_keys_platform_data smartq7_buttons_data = { .buttons = smartq7_buttons, .nbuttons = ARRAY_SIZE(smartq7_buttons), }; static struct platform_device smartq7_buttons_device = { .name = "gpio-keys", .id = 0, .num_resources = 0, .dev = { .platform_data = &smartq7_buttons_data, } }; static struct s3c_fb_pd_win smartq7_fb_win0 = { .win_mode = { .left_margin = 3, .right_margin = 5, .upper_margin = 1, .lower_margin = 20, .hsync_len = 10, .vsync_len = 3, .xres = 800, .yres = 480, .refresh = 80, }, .max_bpp = 32, .default_bpp = 16, }; static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &smartq7_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | VIDCON1_INV_VCLK, }; static struct platform_device *smartq7_devices[] __initdata = { &smartq7_leds_device, &smartq7_buttons_device, }; static void __init smartq7_machine_init(void) { s3c_fb_set_platdata(&smartq7_lcd_pdata); smartq_machine_init(); platform_add_devices(smartq7_devices, ARRAY_SIZE(smartq7_devices)); } MACHINE_START(SMARTQ7, "SmartQ 7") /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */ .atag_offset = 0x100, .init_irq = s3c6410_init_irq, .handle_irq = vic_handle_irq, .map_io = smartq_map_io, .init_machine = smartq7_machine_init, .timer = &s3c24xx_timer, .restart = s3c64xx_restart, MACHINE_END
gpl-2.0
chacox/chaco_9195_cm-13.0
sound/pci/oxygen/xonar_wm87x6.c
5081
39077
/* * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ /* * Xonar DS * -------- * * CMI8788: * * SPI 0 -> WM8766 (surround, center/LFE, back) * SPI 1 -> WM8776 (front, input) * * GPIO 4 <- headphone detect, 0 = plugged * GPIO 6 -> route input jack to mic-in (0) or line-in (1) * GPIO 7 -> enable output to front L/R speaker channels * GPIO 8 -> enable output to other speaker channels and front panel headphone * * WM8776: * * input 1 <- line * input 2 <- mic * input 3 <- front mic * input 4 <- aux */ /* * Xonar HDAV1.3 Slim * ------------------ * * CMI8788: * * I²C <-> WM8776 (addr 0011010) * * GPIO 0 -> disable HDMI output * GPIO 1 -> enable HP output * GPIO 6 -> firmware EEPROM I²C clock * GPIO 7 <-> firmware EEPROM I²C data * * UART <-> HDMI controller * * WM8776: * * input 1 <- mic * input 2 <- aux */ #include <linux/pci.h> #include <linux/delay.h> #include <sound/control.h> #include <sound/core.h> #include <sound/info.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "xonar.h" #include "wm8776.h" #include "wm8766.h" #define GPIO_DS_HP_DETECT 0x0010 #define GPIO_DS_INPUT_ROUTE 0x0040 #define GPIO_DS_OUTPUT_FRONTLR 0x0080 #define GPIO_DS_OUTPUT_ENABLE 0x0100 #define GPIO_SLIM_HDMI_DISABLE 0x0001 #define GPIO_SLIM_OUTPUT_ENABLE 0x0002 #define GPIO_SLIM_FIRMWARE_CLK 0x0040 #define GPIO_SLIM_FIRMWARE_DATA 0x0080 #define I2C_DEVICE_WM8776 0x34 /* 001101, 0, /W=0 */ #define LC_CONTROL_LIMITER 0x40000000 #define LC_CONTROL_ALC 0x20000000 struct xonar_wm87x6 { struct xonar_generic generic; u16 wm8776_regs[0x17]; u16 wm8766_regs[0x10]; struct snd_kcontrol *line_adcmux_control; struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; struct snd_jack *hp_jack; struct xonar_hdmi hdmi; }; static void wm8776_write_spi(struct oxygen *chip, unsigned int reg, unsigned int value) { oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (1 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_LO, (reg << 9) | value); } static void wm8776_write_i2c(struct oxygen *chip, unsigned int reg, unsigned int value) { oxygen_write_i2c(chip, I2C_DEVICE_WM8776, (reg << 1) | (value >> 8), value); } static void wm8776_write(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) == OXYGEN_FUNCTION_SPI) wm8776_write_spi(chip, reg, value); else wm8776_write_i2c(chip, reg, value); if (reg < ARRAY_SIZE(data->wm8776_regs)) { if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER) value &= ~WM8776_UPDATE; data->wm8776_regs[reg] = value; } } static void wm8776_write_cached(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if (reg >= ARRAY_SIZE(data->wm8776_regs) || value != data->wm8776_regs[reg]) wm8776_write(chip, reg, value); } static void wm8766_write(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (0 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_LO, (reg << 9) | value); if (reg < ARRAY_SIZE(data->wm8766_regs)) { if ((reg >= WM8766_LDA1 && reg <= WM8766_RDA1) || (reg >= WM8766_LDA2 && reg <= WM8766_MASTDA)) value &= ~WM8766_UPDATE; data->wm8766_regs[reg] = value; } } static void wm8766_write_cached(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if (reg >= ARRAY_SIZE(data->wm8766_regs) || value != data->wm8766_regs[reg]) wm8766_write(chip, reg, value); } static void wm8776_registers_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8776_write(chip, WM8776_RESET, 0); wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK); wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN | WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT); wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0); wm8776_write(chip, WM8776_DACIFCTRL, WM8776_DACFMT_LJUST | WM8776_DACWL_24); wm8776_write(chip, WM8776_ADCIFCTRL, data->wm8776_regs[WM8776_ADCIFCTRL]); wm8776_write(chip, WM8776_MSTRCTRL, data->wm8776_regs[WM8776_MSTRCTRL]); wm8776_write(chip, WM8776_PWRDOWN, data->wm8776_regs[WM8776_PWRDOWN]); wm8776_write(chip, WM8776_HPLVOL, data->wm8776_regs[WM8776_HPLVOL]); wm8776_write(chip, WM8776_HPRVOL, data->wm8776_regs[WM8776_HPRVOL] | WM8776_UPDATE); wm8776_write(chip, WM8776_ADCLVOL, data->wm8776_regs[WM8776_ADCLVOL]); wm8776_write(chip, WM8776_ADCRVOL, data->wm8776_regs[WM8776_ADCRVOL]); wm8776_write(chip, WM8776_ADCMUX, data->wm8776_regs[WM8776_ADCMUX]); wm8776_write(chip, WM8776_DACLVOL, chip->dac_volume[0]); wm8776_write(chip, WM8776_DACRVOL, chip->dac_volume[1] | WM8776_UPDATE); } static void wm8766_registers_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8766_write(chip, WM8766_RESET, 0); wm8766_write(chip, WM8766_DAC_CTRL, data->wm8766_regs[WM8766_DAC_CTRL]); wm8766_write(chip, WM8766_INT_CTRL, WM8766_FMT_LJUST | WM8766_IWL_24); wm8766_write(chip, WM8766_DAC_CTRL2, WM8766_ZCD | (chip->dac_mute ? WM8766_DMUTE_MASK : 0)); wm8766_write(chip, WM8766_LDA1, chip->dac_volume[2]); wm8766_write(chip, WM8766_RDA1, chip->dac_volume[3]); wm8766_write(chip, WM8766_LDA2, chip->dac_volume[4]); wm8766_write(chip, WM8766_RDA2, chip->dac_volume[5]); wm8766_write(chip, WM8766_LDA3, chip->dac_volume[6]); wm8766_write(chip, WM8766_RDA3, chip->dac_volume[7] | WM8766_UPDATE); } static void wm8776_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->wm8776_regs[WM8776_HPLVOL] = (0x79 - 60) | WM8776_HPZCEN; data->wm8776_regs[WM8776_HPRVOL] = (0x79 - 60) | WM8776_HPZCEN; data->wm8776_regs[WM8776_ADCIFCTRL] = WM8776_ADCFMT_LJUST | WM8776_ADCWL_24 | WM8776_ADCMCLK; data->wm8776_regs[WM8776_MSTRCTRL] = WM8776_ADCRATE_256 | WM8776_DACRATE_256; data->wm8776_regs[WM8776_PWRDOWN] = WM8776_HPPD; data->wm8776_regs[WM8776_ADCLVOL] = 0xa5 | WM8776_ZCA; data->wm8776_regs[WM8776_ADCRVOL] = 0xa5 | WM8776_ZCA; data->wm8776_regs[WM8776_ADCMUX] = 0x001; wm8776_registers_init(chip); } static void wm8766_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->wm8766_regs[WM8766_DAC_CTRL] = WM8766_PL_LEFT_LEFT | WM8766_PL_RIGHT_RIGHT; wm8766_registers_init(chip); } static void xonar_ds_handle_hp_jack(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; bool hp_plugged; unsigned int reg; mutex_lock(&chip->mutex); hp_plugged = !(oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DS_HP_DETECT); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, hp_plugged ? 0 : GPIO_DS_OUTPUT_FRONTLR, GPIO_DS_OUTPUT_FRONTLR); reg = data->wm8766_regs[WM8766_DAC_CTRL] & ~WM8766_MUTEALL; if (hp_plugged) reg |= WM8766_MUTEALL; wm8766_write_cached(chip, WM8766_DAC_CTRL, reg); snd_jack_report(data->hp_jack, hp_plugged ? SND_JACK_HEADPHONE : 0); mutex_unlock(&chip->mutex); } static void xonar_ds_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->generic.anti_pop_delay = 300; data->generic.output_enable_bit = GPIO_DS_OUTPUT_ENABLE; wm8776_init(chip); wm8766_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DS_INPUT_ROUTE | GPIO_DS_OUTPUT_FRONTLR); oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DS_HP_DETECT); oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_DS_INPUT_ROUTE); oxygen_set_bits16(chip, OXYGEN_GPIO_INTERRUPT_MASK, GPIO_DS_HP_DETECT); chip->interrupt_mask |= OXYGEN_INT_GPIO; xonar_enable_output(chip); snd_jack_new(chip->card, "Headphone", SND_JACK_HEADPHONE, &data->hp_jack); xonar_ds_handle_hp_jack(chip); snd_component_add(chip->card, "WM8776"); snd_component_add(chip->card, "WM8766"); } static void xonar_hdav_slim_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->generic.anti_pop_delay = 300; data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE; wm8776_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_SLIM_HDMI_DISABLE | GPIO_SLIM_FIRMWARE_CLK | GPIO_SLIM_FIRMWARE_DATA); xonar_hdmi_init(chip, &data->hdmi); xonar_enable_output(chip); snd_component_add(chip->card, "WM8776"); } static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); wm8776_write(chip, WM8776_RESET, 0); } static void xonar_hdav_slim_cleanup(struct oxygen *chip) { xonar_hdmi_cleanup(chip); xonar_disable_output(chip); wm8776_write(chip, WM8776_RESET, 0); msleep(2); } static void xonar_ds_suspend(struct oxygen *chip) { xonar_ds_cleanup(chip); } static void xonar_hdav_slim_suspend(struct oxygen *chip) { xonar_hdav_slim_cleanup(chip); } static void xonar_ds_resume(struct oxygen *chip) { wm8776_registers_init(chip); wm8766_registers_init(chip); xonar_enable_output(chip); xonar_ds_handle_hp_jack(chip); } static void xonar_hdav_slim_resume(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8776_registers_init(chip); xonar_hdmi_resume(chip, &data->hdmi); xonar_enable_output(chip); } static void wm8776_adc_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { if (channel == PCM_A) { hardware->rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; hardware->rate_max = 96000; } } static void xonar_hdav_slim_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { wm8776_adc_hardware_filter(channel, hardware); xonar_hdmi_pcm_hardware_filter(channel, hardware); } static void set_wm87x6_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { } static void set_wm8776_adc_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { u16 reg; reg = WM8776_ADCRATE_256 | WM8776_DACRATE_256; if (params_rate(params) > 48000) reg |= WM8776_ADCOSR; wm8776_write_cached(chip, WM8776_MSTRCTRL, reg); } static void set_hdav_slim_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct xonar_wm87x6 *data = chip->model_data; xonar_set_hdmi_params(chip, &data->hdmi, params); } static void update_wm8776_volume(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; u8 to_change; if (chip->dac_volume[0] == chip->dac_volume[1]) { if (chip->dac_volume[0] != data->wm8776_regs[WM8776_DACLVOL] || chip->dac_volume[1] != data->wm8776_regs[WM8776_DACRVOL]) { wm8776_write(chip, WM8776_DACMASTER, chip->dac_volume[0] | WM8776_UPDATE); data->wm8776_regs[WM8776_DACLVOL] = chip->dac_volume[0]; data->wm8776_regs[WM8776_DACRVOL] = chip->dac_volume[0]; } } else { to_change = (chip->dac_volume[0] != data->wm8776_regs[WM8776_DACLVOL]) << 0; to_change |= (chip->dac_volume[1] != data->wm8776_regs[WM8776_DACLVOL]) << 1; if (to_change & 1) wm8776_write(chip, WM8776_DACLVOL, chip->dac_volume[0] | ((to_change & 2) ? 0 : WM8776_UPDATE)); if (to_change & 2) wm8776_write(chip, WM8776_DACRVOL, chip->dac_volume[1] | WM8776_UPDATE); } } static void update_wm87x6_volume(struct oxygen *chip) { static const u8 wm8766_regs[6] = { WM8766_LDA1, WM8766_RDA1, WM8766_LDA2, WM8766_RDA2, WM8766_LDA3, WM8766_RDA3, }; struct xonar_wm87x6 *data = chip->model_data; unsigned int i; u8 to_change; update_wm8776_volume(chip); if (chip->dac_volume[2] == chip->dac_volume[3] && chip->dac_volume[2] == chip->dac_volume[4] && chip->dac_volume[2] == chip->dac_volume[5] && chip->dac_volume[2] == chip->dac_volume[6] && chip->dac_volume[2] == chip->dac_volume[7]) { to_change = 0; for (i = 0; i < 6; ++i) if (chip->dac_volume[2] != data->wm8766_regs[wm8766_regs[i]]) to_change = 1; if (to_change) { wm8766_write(chip, WM8766_MASTDA, chip->dac_volume[2] | WM8766_UPDATE); for (i = 0; i < 6; ++i) data->wm8766_regs[wm8766_regs[i]] = chip->dac_volume[2]; } } else { to_change = 0; for (i = 0; i < 6; ++i) to_change |= (chip->dac_volume[2 + i] != data->wm8766_regs[wm8766_regs[i]]) << i; for (i = 0; i < 6; ++i) if (to_change & (1 << i)) wm8766_write(chip, wm8766_regs[i], chip->dac_volume[2 + i] | ((to_change & (0x3e << i)) ? 0 : WM8766_UPDATE)); } } static void update_wm8776_mute(struct oxygen *chip) { wm8776_write_cached(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0); } static void update_wm87x6_mute(struct oxygen *chip) { update_wm8776_mute(chip); wm8766_write_cached(chip, WM8766_DAC_CTRL2, WM8766_ZCD | (chip->dac_mute ? WM8766_DMUTE_MASK : 0)); } static void update_wm8766_center_lfe_mix(struct oxygen *chip, bool mixed) { struct xonar_wm87x6 *data = chip->model_data; unsigned int reg; /* * The WM8766 can mix left and right channels, but this setting * applies to all three stereo pairs. */ reg = data->wm8766_regs[WM8766_DAC_CTRL] & ~(WM8766_PL_LEFT_MASK | WM8766_PL_RIGHT_MASK); if (mixed) reg |= WM8766_PL_LEFT_LRMIX | WM8766_PL_RIGHT_LRMIX; else reg |= WM8766_PL_LEFT_LEFT | WM8766_PL_RIGHT_RIGHT; wm8766_write_cached(chip, WM8766_DAC_CTRL, reg); } static void xonar_ds_gpio_changed(struct oxygen *chip) { xonar_ds_handle_hp_jack(chip); } static int wm8776_bit_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u16 bit = ctl->private_value & 0xffff; unsigned int reg_index = (ctl->private_value >> 16) & 0xff; bool invert = (ctl->private_value >> 24) & 1; value->value.integer.value[0] = ((data->wm8776_regs[reg_index] & bit) != 0) ^ invert; return 0; } static int wm8776_bit_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u16 bit = ctl->private_value & 0xffff; u16 reg_value; unsigned int reg_index = (ctl->private_value >> 16) & 0xff; bool invert = (ctl->private_value >> 24) & 1; int changed; mutex_lock(&chip->mutex); reg_value = data->wm8776_regs[reg_index] & ~bit; if (value->value.integer.value[0] ^ invert) reg_value |= bit; changed = reg_value != data->wm8776_regs[reg_index]; if (changed) wm8776_write(chip, reg_index, reg_value); mutex_unlock(&chip->mutex); return changed; } static int wm8776_field_enum_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const hld[16] = { "0 ms", "2.67 ms", "5.33 ms", "10.6 ms", "21.3 ms", "42.7 ms", "85.3 ms", "171 ms", "341 ms", "683 ms", "1.37 s", "2.73 s", "5.46 s", "10.9 s", "21.8 s", "43.7 s", }; static const char *const atk_lim[11] = { "0.25 ms", "0.5 ms", "1 ms", "2 ms", "4 ms", "8 ms", "16 ms", "32 ms", "64 ms", "128 ms", "256 ms", }; static const char *const atk_alc[11] = { "8.40 ms", "16.8 ms", "33.6 ms", "67.2 ms", "134 ms", "269 ms", "538 ms", "1.08 s", "2.15 s", "4.3 s", "8.6 s", }; static const char *const dcy_lim[11] = { "1.2 ms", "2.4 ms", "4.8 ms", "9.6 ms", "19.2 ms", "38.4 ms", "76.8 ms", "154 ms", "307 ms", "614 ms", "1.23 s", }; static const char *const dcy_alc[11] = { "33.5 ms", "67.0 ms", "134 ms", "268 ms", "536 ms", "1.07 s", "2.14 s", "4.29 s", "8.58 s", "17.2 s", "34.3 s", }; static const char *const tranwin[8] = { "0 us", "62.5 us", "125 us", "250 us", "500 us", "1 ms", "2 ms", "4 ms", }; u8 max; const char *const *names; max = (ctl->private_value >> 12) & 0xf; switch ((ctl->private_value >> 24) & 0x1f) { case WM8776_ALCCTRL2: names = hld; break; case WM8776_ALCCTRL3: if (((ctl->private_value >> 20) & 0xf) == 0) { if (ctl->private_value & LC_CONTROL_LIMITER) names = atk_lim; else names = atk_alc; } else { if (ctl->private_value & LC_CONTROL_LIMITER) names = dcy_lim; else names = dcy_alc; } break; case WM8776_LIMITER: names = tranwin; break; default: return -ENXIO; } return snd_ctl_enum_info(info, 1, max + 1, names); } static int wm8776_field_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = (ctl->private_value >> 8) & 0xf; info->value.integer.max = (ctl->private_value >> 12) & 0xf; return 0; } static void wm8776_field_set_from_ctl(struct snd_kcontrol *ctl) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int value, reg_index, mode; u8 min, max, shift; u16 mask, reg_value; bool invert; if ((data->wm8776_regs[WM8776_ALCCTRL1] & WM8776_LCSEL_MASK) == WM8776_LCSEL_LIMITER) mode = LC_CONTROL_LIMITER; else mode = LC_CONTROL_ALC; if (!(ctl->private_value & mode)) return; value = ctl->private_value & 0xf; min = (ctl->private_value >> 8) & 0xf; max = (ctl->private_value >> 12) & 0xf; mask = (ctl->private_value >> 16) & 0xf; shift = (ctl->private_value >> 20) & 0xf; reg_index = (ctl->private_value >> 24) & 0x1f; invert = (ctl->private_value >> 29) & 0x1; if (invert) value = max - (value - min); reg_value = data->wm8776_regs[reg_index]; reg_value &= ~(mask << shift); reg_value |= value << shift; wm8776_write_cached(chip, reg_index, reg_value); } static int wm8776_field_set(struct snd_kcontrol *ctl, unsigned int value) { struct oxygen *chip = ctl->private_data; u8 min, max; int changed; min = (ctl->private_value >> 8) & 0xf; max = (ctl->private_value >> 12) & 0xf; if (value < min || value > max) return -EINVAL; mutex_lock(&chip->mutex); changed = value != (ctl->private_value & 0xf); if (changed) { ctl->private_value = (ctl->private_value & ~0xf) | value; wm8776_field_set_from_ctl(ctl); } mutex_unlock(&chip->mutex); return changed; } static int wm8776_field_enum_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.enumerated.item[0] = ctl->private_value & 0xf; return 0; } static int wm8776_field_volume_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.integer.value[0] = ctl->private_value & 0xf; return 0; } static int wm8776_field_enum_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { return wm8776_field_set(ctl, value->value.enumerated.item[0]); } static int wm8776_field_volume_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { return wm8776_field_set(ctl, value->value.integer.value[0]); } static int wm8776_hp_vol_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0x79 - 60; info->value.integer.max = 0x7f; return 0; } static int wm8776_hp_vol_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = data->wm8776_regs[WM8776_HPLVOL] & WM8776_HPATT_MASK; value->value.integer.value[1] = data->wm8776_regs[WM8776_HPRVOL] & WM8776_HPATT_MASK; mutex_unlock(&chip->mutex); return 0; } static int wm8776_hp_vol_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u8 to_update; mutex_lock(&chip->mutex); to_update = (value->value.integer.value[0] != (data->wm8776_regs[WM8776_HPLVOL] & WM8776_HPATT_MASK)) << 0; to_update |= (value->value.integer.value[1] != (data->wm8776_regs[WM8776_HPRVOL] & WM8776_HPATT_MASK)) << 1; if (value->value.integer.value[0] == value->value.integer.value[1]) { if (to_update) { wm8776_write(chip, WM8776_HPMASTER, value->value.integer.value[0] | WM8776_HPZCEN | WM8776_UPDATE); data->wm8776_regs[WM8776_HPLVOL] = value->value.integer.value[0] | WM8776_HPZCEN; data->wm8776_regs[WM8776_HPRVOL] = value->value.integer.value[0] | WM8776_HPZCEN; } } else { if (to_update & 1) wm8776_write(chip, WM8776_HPLVOL, value->value.integer.value[0] | WM8776_HPZCEN | ((to_update & 2) ? 0 : WM8776_UPDATE)); if (to_update & 2) wm8776_write(chip, WM8776_HPRVOL, value->value.integer.value[1] | WM8776_HPZCEN | WM8776_UPDATE); } mutex_unlock(&chip->mutex); return to_update != 0; } static int wm8776_input_mux_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int mux_bit = ctl->private_value; value->value.integer.value[0] = !!(data->wm8776_regs[WM8776_ADCMUX] & mux_bit); return 0; } static int wm8776_input_mux_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; struct snd_kcontrol *other_ctl; unsigned int mux_bit = ctl->private_value; u16 reg; int changed; mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCMUX]; if (value->value.integer.value[0]) { reg |= mux_bit; /* line-in and mic-in are exclusive */ mux_bit ^= 3; if (reg & mux_bit) { reg &= ~mux_bit; if (mux_bit == 1) other_ctl = data->line_adcmux_control; else other_ctl = data->mic_adcmux_control; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &other_ctl->id); } } else reg &= ~mux_bit; changed = reg != data->wm8776_regs[WM8776_ADCMUX]; if (changed) { oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, reg & 1 ? GPIO_DS_INPUT_ROUTE : 0, GPIO_DS_INPUT_ROUTE); wm8776_write(chip, WM8776_ADCMUX, reg); } mutex_unlock(&chip->mutex); return changed; } static int wm8776_input_vol_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0xa5; info->value.integer.max = 0xff; return 0; } static int wm8776_input_vol_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = data->wm8776_regs[WM8776_ADCLVOL] & WM8776_AGMASK; value->value.integer.value[1] = data->wm8776_regs[WM8776_ADCRVOL] & WM8776_AGMASK; mutex_unlock(&chip->mutex); return 0; } static int wm8776_input_vol_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; int changed = 0; mutex_lock(&chip->mutex); changed = (value->value.integer.value[0] != (data->wm8776_regs[WM8776_ADCLVOL] & WM8776_AGMASK)) || (value->value.integer.value[1] != (data->wm8776_regs[WM8776_ADCRVOL] & WM8776_AGMASK)); wm8776_write_cached(chip, WM8776_ADCLVOL, value->value.integer.value[0] | WM8776_ZCA); wm8776_write_cached(chip, WM8776_ADCRVOL, value->value.integer.value[1] | WM8776_ZCA); mutex_unlock(&chip->mutex); return changed; } static int wm8776_level_control_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "None", "Peak Limiter", "Automatic Level Control" }; return snd_ctl_enum_info(info, 1, 3, names); } static int wm8776_level_control_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; if (!(data->wm8776_regs[WM8776_ALCCTRL2] & WM8776_LCEN)) value->value.enumerated.item[0] = 0; else if ((data->wm8776_regs[WM8776_ALCCTRL1] & WM8776_LCSEL_MASK) == WM8776_LCSEL_LIMITER) value->value.enumerated.item[0] = 1; else value->value.enumerated.item[0] = 2; return 0; } static void activate_control(struct oxygen *chip, struct snd_kcontrol *ctl, unsigned int mode) { unsigned int access; if (ctl->private_value & mode) access = 0; else access = SNDRV_CTL_ELEM_ACCESS_INACTIVE; if ((ctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_INACTIVE) != access) { ctl->vd[0].access ^= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); } } static int wm8776_level_control_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int mode = 0, i; u16 ctrl1, ctrl2; int changed; if (value->value.enumerated.item[0] >= 3) return -EINVAL; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != ctl->private_value; if (changed) { ctl->private_value = value->value.enumerated.item[0]; ctrl1 = data->wm8776_regs[WM8776_ALCCTRL1]; ctrl2 = data->wm8776_regs[WM8776_ALCCTRL2]; switch (value->value.enumerated.item[0]) { default: wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 & ~WM8776_LCEN); break; case 1: wm8776_write_cached(chip, WM8776_ALCCTRL1, (ctrl1 & ~WM8776_LCSEL_MASK) | WM8776_LCSEL_LIMITER); wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 | WM8776_LCEN); mode = LC_CONTROL_LIMITER; break; case 2: wm8776_write_cached(chip, WM8776_ALCCTRL1, (ctrl1 & ~WM8776_LCSEL_MASK) | WM8776_LCSEL_ALC_STEREO); wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 | WM8776_LCEN); mode = LC_CONTROL_ALC; break; } for (i = 0; i < ARRAY_SIZE(data->lc_controls); ++i) activate_control(chip, data->lc_controls[i], mode); } mutex_unlock(&chip->mutex); return changed; } static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[2] = { "None", "High-pass Filter" }; return snd_ctl_enum_info(info, 1, 2, names); } static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; value->value.enumerated.item[0] = !(data->wm8776_regs[WM8776_ADCIFCTRL] & WM8776_ADCHPD); return 0; } static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int reg; int changed; mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCIFCTRL] & ~WM8776_ADCHPD; if (!value->value.enumerated.item[0]) reg |= WM8776_ADCHPD; changed = reg != data->wm8776_regs[WM8776_ADCIFCTRL]; if (changed) wm8776_write(chip, WM8776_ADCIFCTRL, reg); mutex_unlock(&chip->mutex); return changed; } #define WM8776_BIT_SWITCH(xname, reg, bit, invert, flags) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .info = snd_ctl_boolean_mono_info, \ .get = wm8776_bit_switch_get, \ .put = wm8776_bit_switch_put, \ .private_value = ((reg) << 16) | (bit) | ((invert) << 24) | (flags), \ } #define _WM8776_FIELD_CTL(xname, reg, shift, initval, min, max, mask, flags) \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .private_value = (initval) | ((min) << 8) | ((max) << 12) | \ ((mask) << 16) | ((shift) << 20) | ((reg) << 24) | (flags) #define WM8776_FIELD_CTL_ENUM(xname, reg, shift, init, min, max, mask, flags) {\ _WM8776_FIELD_CTL(xname " Capture Enum", \ reg, shift, init, min, max, mask, flags), \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_INACTIVE, \ .info = wm8776_field_enum_info, \ .get = wm8776_field_enum_get, \ .put = wm8776_field_enum_put, \ } #define WM8776_FIELD_CTL_VOLUME(a, b, c, d, e, f, g, h, tlv_p) { \ _WM8776_FIELD_CTL(a " Capture Volume", b, c, d, e, f, g, h), \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_INACTIVE | \ SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = wm8776_field_volume_info, \ .get = wm8776_field_volume_get, \ .put = wm8776_field_volume_put, \ .tlv = { .p = tlv_p }, \ } static const DECLARE_TLV_DB_SCALE(wm87x6_dac_db_scale, -6000, 50, 0); static const DECLARE_TLV_DB_SCALE(wm8776_adc_db_scale, -2100, 50, 0); static const DECLARE_TLV_DB_SCALE(wm8776_hp_db_scale, -6000, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_lct_db_scale, -1600, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxgain_db_scale, 0, 400, 0); static const DECLARE_TLV_DB_SCALE(wm8776_ngth_db_scale, -7800, 600, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_lim_db_scale, -1200, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_alc_db_scale, -2100, 400, 0); static const struct snd_kcontrol_new ds_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Volume", .info = wm8776_hp_vol_info, .get = wm8776_hp_vol_get, .put = wm8776_hp_vol_put, .tlv = { .p = wm8776_hp_db_scale }, }, WM8776_BIT_SWITCH("Headphone Playback Switch", WM8776_PWRDOWN, WM8776_HPPD, 1, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Capture Volume", .info = wm8776_input_vol_info, .get = wm8776_input_vol_get, .put = wm8776_input_vol_put, .tlv = { .p = wm8776_adc_db_scale }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Capture Switch", .info = snd_ctl_boolean_mono_info, .get = wm8776_input_mux_get, .put = wm8776_input_mux_put, .private_value = 1 << 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Switch", .info = snd_ctl_boolean_mono_info, .get = wm8776_input_mux_get, .put = wm8776_input_mux_put, .private_value = 1 << 1, }, WM8776_BIT_SWITCH("Front Mic Capture Switch", WM8776_ADCMUX, 1 << 2, 0, 0), WM8776_BIT_SWITCH("Aux Capture Switch", WM8776_ADCMUX, 1 << 3, 0, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Filter Capture Enum", .info = hpf_info, .get = hpf_get, .put = hpf_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Level Control Capture Enum", .info = wm8776_level_control_info, .get = wm8776_level_control_get, .put = wm8776_level_control_put, .private_value = 0, }, }; static const struct snd_kcontrol_new hdav_slim_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "HDMI Playback Switch", .info = snd_ctl_boolean_mono_info, .get = xonar_gpio_bit_switch_get, .put = xonar_gpio_bit_switch_put, .private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Volume", .info = wm8776_hp_vol_info, .get = wm8776_hp_vol_get, .put = wm8776_hp_vol_put, .tlv = { .p = wm8776_hp_db_scale }, }, WM8776_BIT_SWITCH("Headphone Playback Switch", WM8776_PWRDOWN, WM8776_HPPD, 1, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Capture Volume", .info = wm8776_input_vol_info, .get = wm8776_input_vol_get, .put = wm8776_input_vol_put, .tlv = { .p = wm8776_adc_db_scale }, }, WM8776_BIT_SWITCH("Mic Capture Switch", WM8776_ADCMUX, 1 << 0, 0, 0), WM8776_BIT_SWITCH("Aux Capture Switch", WM8776_ADCMUX, 1 << 1, 0, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Filter Capture Enum", .info = hpf_info, .get = hpf_get, .put = hpf_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Level Control Capture Enum", .info = wm8776_level_control_info, .get = wm8776_level_control_get, .put = wm8776_level_control_put, .private_value = 0, }, }; static const struct snd_kcontrol_new lc_controls[] = { WM8776_FIELD_CTL_VOLUME("Limiter Threshold", WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf, LC_CONTROL_LIMITER, wm8776_lct_db_scale), WM8776_FIELD_CTL_ENUM("Limiter Attack Time", WM8776_ALCCTRL3, 0, 2, 0, 10, 0xf, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_ENUM("Limiter Decay Time", WM8776_ALCCTRL3, 4, 3, 0, 10, 0xf, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_ENUM("Limiter Transient Window", WM8776_LIMITER, 4, 2, 0, 7, 0x7, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_VOLUME("Limiter Maximum Attenuation", WM8776_LIMITER, 0, 6, 3, 12, 0xf, LC_CONTROL_LIMITER, wm8776_maxatten_lim_db_scale), WM8776_FIELD_CTL_VOLUME("ALC Target Level", WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf, LC_CONTROL_ALC, wm8776_lct_db_scale), WM8776_FIELD_CTL_ENUM("ALC Attack Time", WM8776_ALCCTRL3, 0, 2, 0, 10, 0xf, LC_CONTROL_ALC), WM8776_FIELD_CTL_ENUM("ALC Decay Time", WM8776_ALCCTRL3, 4, 3, 0, 10, 0xf, LC_CONTROL_ALC), WM8776_FIELD_CTL_VOLUME("ALC Maximum Gain", WM8776_ALCCTRL1, 4, 7, 1, 7, 0x7, LC_CONTROL_ALC, wm8776_maxgain_db_scale), WM8776_FIELD_CTL_VOLUME("ALC Maximum Attenuation", WM8776_LIMITER, 0, 10, 10, 15, 0xf, LC_CONTROL_ALC, wm8776_maxatten_alc_db_scale), WM8776_FIELD_CTL_ENUM("ALC Hold Time", WM8776_ALCCTRL2, 0, 0, 0, 15, 0xf, LC_CONTROL_ALC), WM8776_BIT_SWITCH("Noise Gate Capture Switch", WM8776_NOISEGATE, WM8776_NGAT, 0, LC_CONTROL_ALC), WM8776_FIELD_CTL_VOLUME("Noise Gate Threshold", WM8776_NOISEGATE, 2, 0, 0, 7, 0x7, LC_CONTROL_ALC, wm8776_ngth_db_scale), }; static int add_lc_controls(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; struct snd_kcontrol *ctl; int err; BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { ctl = snd_ctl_new1(&lc_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; data->lc_controls[i] = ctl; } return 0; } static int xonar_ds_mixer_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; struct snd_kcontrol *ctl; int err; for (i = 0; i < ARRAY_SIZE(ds_controls); ++i) { ctl = snd_ctl_new1(&ds_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; if (!strcmp(ctl->id.name, "Line Capture Switch")) data->line_adcmux_control = ctl; else if (!strcmp(ctl->id.name, "Mic Capture Switch")) data->mic_adcmux_control = ctl; } if (!data->line_adcmux_control || !data->mic_adcmux_control) return -ENXIO; return add_lc_controls(chip); } static int xonar_hdav_slim_mixer_init(struct oxygen *chip) { unsigned int i; struct snd_kcontrol *ctl; int err; for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) { ctl = snd_ctl_new1(&hdav_slim_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; } return add_lc_controls(chip); } static void dump_wm8776_registers(struct oxygen *chip, struct snd_info_buffer *buffer) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; snd_iprintf(buffer, "\nWM8776:\n00:"); for (i = 0; i < 0x10; ++i) snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); snd_iprintf(buffer, "\n10:"); for (i = 0x10; i < 0x17; ++i) snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); snd_iprintf(buffer, "\n"); } static void dump_wm87x6_registers(struct oxygen *chip, struct snd_info_buffer *buffer) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; dump_wm8776_registers(chip, buffer); snd_iprintf(buffer, "\nWM8766:\n00:"); for (i = 0; i < 0x10; ++i) snd_iprintf(buffer, " %03x", data->wm8766_regs[i]); snd_iprintf(buffer, "\n"); } static const struct oxygen_model model_xonar_ds = { .shortname = "Xonar DS", .longname = "Asus Virtuoso 66", .chip = "AV200", .init = xonar_ds_init, .mixer_init = xonar_ds_mixer_init, .cleanup = xonar_ds_cleanup, .suspend = xonar_ds_suspend, .resume = xonar_ds_resume, .pcm_hardware_filter = wm8776_adc_hardware_filter, .set_dac_params = set_wm87x6_dac_params, .set_adc_params = set_wm8776_adc_params, .update_dac_volume = update_wm87x6_volume, .update_dac_mute = update_wm87x6_mute, .update_center_lfe_mix = update_wm8766_center_lfe_mix, .gpio_changed = xonar_ds_gpio_changed, .dump_registers = dump_wm87x6_registers, .dac_tlv = wm87x6_dac_db_scale, .model_data_size = sizeof(struct xonar_wm87x6), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_1 | CAPTURE_1_FROM_SPDIF, .dac_channels_pcm = 8, .dac_channels_mixer = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_SPI, .dac_mclks = OXYGEN_MCLKS(256, 256, 128), .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; static const struct oxygen_model model_xonar_hdav_slim = { .shortname = "Xonar HDAV1.3 Slim", .longname = "Asus Virtuoso 200", .chip = "AV200", .init = xonar_hdav_slim_init, .mixer_init = xonar_hdav_slim_mixer_init, .cleanup = xonar_hdav_slim_cleanup, .suspend = xonar_hdav_slim_suspend, .resume = xonar_hdav_slim_resume, .pcm_hardware_filter = xonar_hdav_slim_hardware_filter, .set_dac_params = set_hdav_slim_dac_params, .set_adc_params = set_wm8776_adc_params, .update_dac_volume = update_wm8776_volume, .update_dac_mute = update_wm8776_mute, .uart_input = xonar_hdmi_uart_input, .dump_registers = dump_wm8776_registers, .dac_tlv = wm87x6_dac_db_scale, .model_data_size = sizeof(struct xonar_wm87x6), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_1 | CAPTURE_1_FROM_SPDIF, .dac_channels_pcm = 8, .dac_channels_mixer = 2, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_2WIRE, .dac_mclks = OXYGEN_MCLKS(256, 256, 128), .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; int __devinit get_xonar_wm87x6_model(struct oxygen *chip, const struct pci_device_id *id) { switch (id->subdevice) { case 0x838e: chip->model = model_xonar_ds; break; case 0x835e: chip->model = model_xonar_hdav_slim; break; default: return -EINVAL; } return 0; }
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/watchdog/pnx833x_wdt.c
7385
7288
/* * PNX833x Hardware Watchdog Driver * Copyright 2008 NXP Semiconductors * Daniel Laird <daniel.j.laird@nxp.com> * Andre McCurdy <andre.mccurdy@nxp.com> * * Heavily based upon - IndyDog 0.3 * A Hardware Watchdog Device for SGI IP22 * * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * based on softdog.c by Alan Cox <alan@redhat.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <asm/mach-pnx833x/pnx833x.h> #define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */ #define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */ #define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY) #define PNX_TIMEOUT_VALUE 2040000000U /** CONFIG block */ #define PNX833X_CONFIG (0x07000U) #define PNX833X_CONFIG_CPU_WATCHDOG (0x54) #define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58) #define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c) /** RESET block */ #define PNX833X_RESET (0x08000U) #define PNX833X_RESET_CONFIG (0x08) static int pnx833x_wdt_alive; /* Set default timeout in MHZ.*/ static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT; module_param(pnx833x_wdt_timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default=" __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds)."); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #define START_DEFAULT 1 static int start_enabled = START_DEFAULT; module_param(start_enabled, int, 0); MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion " "(default=" __MODULE_STRING(START_DEFAULT) ")"); static void pnx833x_wdt_start(void) { /* Enable watchdog causing reset. */ PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1; /* Set timeout.*/ PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout; /* Enable watchdog. */ PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1; pr_info("Started watchdog timer\n"); } static void pnx833x_wdt_stop(void) { /* Disable watchdog causing reset. */ PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE; /* Disable watchdog.*/ PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE; pr_info("Stopped watchdog timer\n"); } static void pnx833x_wdt_ping(void) { PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout; } /* * Allow only one person to hold it open */ static int pnx833x_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &pnx833x_wdt_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ if (!start_enabled) pnx833x_wdt_start(); pnx833x_wdt_ping(); pr_info("Started watchdog timer\n"); return nonseekable_open(inode, file); } static int pnx833x_wdt_release(struct inode *inode, struct file *file) { /* Shut off the timer. * Lock it in if it's a module and we defined ...NOWAYOUT */ if (!nowayout) pnx833x_wdt_stop(); /* Turn the WDT off */ clear_bit(0, &pnx833x_wdt_alive); return 0; } static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { /* Refresh the timer. */ if (len) pnx833x_wdt_ping(); return len; } static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options, new_timeout = 0; uint32_t timeout, timeout_left = 0; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, .firmware_version = 0, .identity = "Hardware Watchdog for PNX833x", }; switch (cmd) { default: return -ENOTTY; case WDIOC_GETSUPPORT: if (copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, (int *)arg); case WDIOC_SETOPTIONS: if (get_user(options, (int *)arg)) return -EFAULT; if (options & WDIOS_DISABLECARD) pnx833x_wdt_stop(); if (options & WDIOS_ENABLECARD) pnx833x_wdt_start(); return 0; case WDIOC_KEEPALIVE: pnx833x_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: { if (get_user(new_timeout, (int *)arg)) return -EFAULT; pnx833x_wdt_timeout = new_timeout; PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout; return put_user(new_timeout, (int *)arg); } case WDIOC_GETTIMEOUT: timeout = PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_WATCHDOG_COMPARE); return put_user(timeout, (int *)arg); case WDIOC_GETTIMELEFT: timeout_left = PNX833X_REG(PNX833X_CONFIG + PNX833X_CONFIG_CPU_WATCHDOG); return put_user(timeout_left, (int *)arg); } } static int pnx833x_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) pnx833x_wdt_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } static const struct file_operations pnx833x_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pnx833x_wdt_write, .unlocked_ioctl = pnx833x_wdt_ioctl, .open = pnx833x_wdt_open, .release = pnx833x_wdt_release, }; static struct miscdevice pnx833x_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pnx833x_wdt_fops, }; static struct notifier_block pnx833x_wdt_notifier = { .notifier_call = pnx833x_wdt_notify_sys, }; static int __init watchdog_init(void) { int ret, cause; /* Lets check the reason for the reset.*/ cause = PNX833X_REG(PNX833X_RESET); /*If bit 31 is set then watchdog was cause of reset.*/ if (cause & 0x80000000) { pr_info("The system was previously reset due to the watchdog firing - please investigate...\n"); } ret = register_reboot_notifier(&pnx833x_wdt_notifier); if (ret) { pr_err("cannot register reboot notifier (err=%d)\n", ret); return ret; } ret = misc_register(&pnx833x_wdt_miscdev); if (ret) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); unregister_reboot_notifier(&pnx833x_wdt_notifier); return ret; } pr_info("Hardware Watchdog Timer for PNX833x: Version 0.1\n"); if (start_enabled) pnx833x_wdt_start(); return 0; } static void __exit watchdog_exit(void) { misc_deregister(&pnx833x_wdt_miscdev); unregister_reboot_notifier(&pnx833x_wdt_notifier); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Daniel Laird/Andre McCurdy"); MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
micky387/kernel_samsung_trelte
drivers/watchdog/sbc_epx_c3.c
7385
5157
/* * SBC EPX C3 0.1 A Hardware Watchdog Device for the Winsystems EPX-C3 * single board computer * * (c) Copyright 2006 Calin A. Culianu <calin@ajvar.org>, All Rights * Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * based on softdog.c by Alan Cox <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/uaccess.h> #include <linux/io.h> static int epx_c3_alive; #define WATCHDOG_TIMEOUT 1 /* 1 sec default timeout */ static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #define EPXC3_WATCHDOG_CTL_REG 0x1ee /* write 1 to enable, 0 to disable */ #define EPXC3_WATCHDOG_PET_REG 0x1ef /* write anything to pet once enabled */ static void epx_c3_start(void) { outb(1, EPXC3_WATCHDOG_CTL_REG); } static void epx_c3_stop(void) { outb(0, EPXC3_WATCHDOG_CTL_REG); pr_info("Stopped watchdog timer\n"); } static void epx_c3_pet(void) { outb(1, EPXC3_WATCHDOG_PET_REG); } /* * Allow only one person to hold it open */ static int epx_c3_open(struct inode *inode, struct file *file) { if (epx_c3_alive) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ epx_c3_start(); epx_c3_pet(); epx_c3_alive = 1; pr_info("Started watchdog timer\n"); return nonseekable_open(inode, file); } static int epx_c3_release(struct inode *inode, struct file *file) { /* Shut off the timer. * Lock it in if it's a module and we defined ...NOWAYOUT */ if (!nowayout) epx_c3_stop(); /* Turn the WDT off */ epx_c3_alive = 0; return 0; } static ssize_t epx_c3_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* Refresh the timer. */ if (len) epx_c3_pet(); return len; } static long epx_c3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options, retval = -EINVAL; int __user *argp = (void __user *)arg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 0, .identity = "Winsystems EPX-C3 H/W Watchdog", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, argp); case WDIOC_SETOPTIONS: if (get_user(options, argp)) return -EFAULT; if (options & WDIOS_DISABLECARD) { epx_c3_stop(); retval = 0; } if (options & WDIOS_ENABLECARD) { epx_c3_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: epx_c3_pet(); return 0; case WDIOC_GETTIMEOUT: return put_user(WATCHDOG_TIMEOUT, argp); default: return -ENOTTY; } } static int epx_c3_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) epx_c3_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } static const struct file_operations epx_c3_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = epx_c3_write, .unlocked_ioctl = epx_c3_ioctl, .open = epx_c3_open, .release = epx_c3_release, }; static struct miscdevice epx_c3_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &epx_c3_fops, }; static struct notifier_block epx_c3_notifier = { .notifier_call = epx_c3_notify_sys, }; static int __init watchdog_init(void) { int ret; if (!request_region(EPXC3_WATCHDOG_CTL_REG, 2, "epxc3_watchdog")) return -EBUSY; ret = register_reboot_notifier(&epx_c3_notifier); if (ret) { pr_err("cannot register reboot notifier (err=%d)\n", ret); goto out; } ret = misc_register(&epx_c3_miscdev); if (ret) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); unregister_reboot_notifier(&epx_c3_notifier); goto out; } pr_info("Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"); return 0; out: release_region(EPXC3_WATCHDOG_CTL_REG, 2); return ret; } static void __exit watchdog_exit(void) { misc_deregister(&epx_c3_miscdev); unregister_reboot_notifier(&epx_c3_notifier); release_region(EPXC3_WATCHDOG_CTL_REG, 2); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>"); MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. " "Note that there is no way to probe for this device -- " "so only use it if you are *sure* you are running on this specific " "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
BIBIMAINETTIDEV/imx6m300-linux-3.10.17
drivers/watchdog/indydog.c
7385
4893
/* * IndyDog 0.3 A Hardware Watchdog Device for SGI IP22 * * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * based on softdog.c by Alan Cox <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/uaccess.h> #include <asm/sgi/mc.h> static unsigned long indydog_alive; static DEFINE_SPINLOCK(indydog_lock); #define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static void indydog_start(void) { u32 mc_ctrl0; spin_lock(&indydog_lock); mc_ctrl0 = sgimc->cpuctrl0; mc_ctrl0 = sgimc->cpuctrl0 | SGIMC_CCTRL0_WDOG; sgimc->cpuctrl0 = mc_ctrl0; spin_unlock(&indydog_lock); } static void indydog_stop(void) { u32 mc_ctrl0; spin_lock(&indydog_lock); mc_ctrl0 = sgimc->cpuctrl0; mc_ctrl0 &= ~SGIMC_CCTRL0_WDOG; sgimc->cpuctrl0 = mc_ctrl0; spin_unlock(&indydog_lock); pr_info("Stopped watchdog timer\n"); } static void indydog_ping(void) { sgimc->watchdogt = 0; } /* * Allow only one person to hold it open */ static int indydog_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &indydog_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ indydog_start(); indydog_ping(); pr_info("Started watchdog timer\n"); return nonseekable_open(inode, file); } static int indydog_release(struct inode *inode, struct file *file) { /* Shut off the timer. * Lock it in if it's a module and we defined ...NOWAYOUT */ if (!nowayout) indydog_stop(); /* Turn the WDT off */ clear_bit(0, &indydog_alive); return 0; } static ssize_t indydog_write(struct file *file, const char *data, size_t len, loff_t *ppos) { /* Refresh the timer. */ if (len) indydog_ping(); return len; } static long indydog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options, retval = -EINVAL; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 0, .identity = "Hardware Watchdog for SGI IP22", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, (int *)arg); case WDIOC_SETOPTIONS: { if (get_user(options, (int *)arg)) return -EFAULT; if (options & WDIOS_DISABLECARD) { indydog_stop(); retval = 0; } if (options & WDIOS_ENABLECARD) { indydog_start(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: indydog_ping(); return 0; case WDIOC_GETTIMEOUT: return put_user(WATCHDOG_TIMEOUT, (int *)arg); default: return -ENOTTY; } } static int indydog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) indydog_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } static const struct file_operations indydog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = indydog_write, .unlocked_ioctl = indydog_ioctl, .open = indydog_open, .release = indydog_release, }; static struct miscdevice indydog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &indydog_fops, }; static struct notifier_block indydog_notifier = { .notifier_call = indydog_notify_sys, }; static int __init watchdog_init(void) { int ret; ret = register_reboot_notifier(&indydog_notifier); if (ret) { pr_err("cannot register reboot notifier (err=%d)\n", ret); return ret; } ret = misc_register(&indydog_miscdev); if (ret) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); unregister_reboot_notifier(&indydog_notifier); return ret; } pr_info("Hardware Watchdog Timer for SGI IP22: 0.3\n"); return 0; } static void __exit watchdog_exit(void) { misc_deregister(&indydog_miscdev); unregister_reboot_notifier(&indydog_notifier); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>"); MODULE_DESCRIPTION("Hardware Watchdog Device for SGI IP22"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
friedrich420/S6_UniKernel_v2
drivers/net/wireless/orinoco/hermes.c
8153
20038
/* hermes.c * * Driver core for the "Hermes" wireless MAC controller, as used in * the Lucent Orinoco and Cabletron RoamAbout cards. It should also * work on the hfa3841 and hfa3842 MAC controller chips used in the * Prism II chipsets. * * This is not a complete driver, just low-level access routines for * the MAC controller itself. * * Based on the prism2 driver from Absolute Value Systems' linux-wlan * project, the Linux wvlan_cs driver, Lucent's HCF-Light * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no * particular order). * * Copyright (C) 2000, David Gibson, Linuxcare Australia. * (C) Copyright David Gibson, IBM Corp. 2001-2003. * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include "hermes.h" /* These are maximum timeouts. Most often, card wil react much faster */ #define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */ #define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */ #define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */ #define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */ /* * AUX port access. To unlock the AUX port write the access keys to the * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL * register. Then read it and make sure it's HERMES_AUX_ENABLED. */ #define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */ #define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */ #define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */ #define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */ #define HERMES_AUX_PW0 0xFE01 #define HERMES_AUX_PW1 0xDC23 #define HERMES_AUX_PW2 0xBA45 /* HERMES_CMD_DOWNLD */ #define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD) /* * Debugging helpers */ #define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \ printk(stuff); } while (0) #undef HERMES_DEBUG #ifdef HERMES_DEBUG #include <stdarg.h> #define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff) #else /* ! HERMES_DEBUG */ #define DEBUG(lvl, stuff...) do { } while (0) #endif /* ! HERMES_DEBUG */ static const struct hermes_ops hermes_ops_local; /* * Internal functions */ /* Issue a command to the chip. Waiting for it to complete is the caller's problem. Returns -EBUSY if the command register is busy, 0 on success. Callable from any context. */ static int hermes_issue_cmd(struct hermes *hw, u16 cmd, u16 param0, u16 param1, u16 param2) { int k = CMD_BUSY_TIMEOUT; u16 reg; /* First wait for the command register to unbusy */ reg = hermes_read_regn(hw, CMD); while ((reg & HERMES_CMD_BUSY) && k) { k--; udelay(1); reg = hermes_read_regn(hw, CMD); } if (reg & HERMES_CMD_BUSY) return -EBUSY; hermes_write_regn(hw, PARAM2, param2); hermes_write_regn(hw, PARAM1, param1); hermes_write_regn(hw, PARAM0, param0); hermes_write_regn(hw, CMD, cmd); return 0; } /* * Function definitions */ /* For doing cmds that wipe the magic constant in SWSUPPORT0 */ static int hermes_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, u16 parm2, struct hermes_response *resp) { int err = 0; int k; u16 status, reg; err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2); if (err) return err; reg = hermes_read_regn(hw, EVSTAT); k = CMD_INIT_TIMEOUT; while ((!(reg & HERMES_EV_CMD)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC); if (!hermes_present(hw)) { DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n", hw->iobase); err = -ENODEV; goto out; } if (!(reg & HERMES_EV_CMD)) { printk(KERN_ERR "hermes @ %p: " "Timeout waiting for card to reset (reg=0x%04x)!\n", hw->iobase, reg); err = -ETIMEDOUT; goto out; } status = hermes_read_regn(hw, STATUS); if (resp) { resp->status = status; resp->resp0 = hermes_read_regn(hw, RESP0); resp->resp1 = hermes_read_regn(hw, RESP1); resp->resp2 = hermes_read_regn(hw, RESP2); } hermes_write_regn(hw, EVACK, HERMES_EV_CMD); if (status & HERMES_STATUS_RESULT) err = -EIO; out: return err; } void hermes_struct_init(struct hermes *hw, void __iomem *address, int reg_spacing) { hw->iobase = address; hw->reg_spacing = reg_spacing; hw->inten = 0x0; hw->eeprom_pda = false; hw->ops = &hermes_ops_local; } EXPORT_SYMBOL(hermes_struct_init); static int hermes_init(struct hermes *hw) { u16 reg; int err = 0; int k; /* We don't want to be interrupted while resetting the chipset */ hw->inten = 0x0; hermes_write_regn(hw, INTEN, 0); hermes_write_regn(hw, EVACK, 0xffff); /* Normally it's a "can't happen" for the command register to be busy when we go to issue a command because we are serializing all commands. However we want to have some chance of resetting the card even if it gets into a stupid state, so we actually wait to see if the command register will unbusy itself here. */ k = CMD_BUSY_TIMEOUT; reg = hermes_read_regn(hw, CMD); while (k && (reg & HERMES_CMD_BUSY)) { if (reg == 0xffff) /* Special case - the card has probably been removed, so don't wait for the timeout */ return -ENODEV; k--; udelay(1); reg = hermes_read_regn(hw, CMD); } /* No need to explicitly handle the timeout - if we've timed out hermes_issue_cmd() will probably return -EBUSY below */ /* According to the documentation, EVSTAT may contain obsolete event occurrence information. We have to acknowledge it by writing EVACK. */ reg = hermes_read_regn(hw, EVSTAT); hermes_write_regn(hw, EVACK, reg); /* We don't use hermes_docmd_wait here, because the reset wipes the magic constant in SWSUPPORT0 away, and it gets confused */ err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL); return err; } /* Issue a command to the chip, and (busy!) wait for it to * complete. * * Returns: * < 0 on internal error * 0 on success * > 0 on error returned by the firmware * * Callable from any context, but locking is your problem. */ static int hermes_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, struct hermes_response *resp) { int err; int k; u16 reg; u16 status; err = hermes_issue_cmd(hw, cmd, parm0, 0, 0); if (err) { if (!hermes_present(hw)) { if (net_ratelimit()) printk(KERN_WARNING "hermes @ %p: " "Card removed while issuing command " "0x%04x.\n", hw->iobase, cmd); err = -ENODEV; } else if (net_ratelimit()) printk(KERN_ERR "hermes @ %p: " "Error %d issuing command 0x%04x.\n", hw->iobase, err, cmd); goto out; } reg = hermes_read_regn(hw, EVSTAT); k = CMD_COMPL_TIMEOUT; while ((!(reg & HERMES_EV_CMD)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } if (!hermes_present(hw)) { printk(KERN_WARNING "hermes @ %p: Card removed " "while waiting for command 0x%04x completion.\n", hw->iobase, cmd); err = -ENODEV; goto out; } if (!(reg & HERMES_EV_CMD)) { printk(KERN_ERR "hermes @ %p: Timeout waiting for " "command 0x%04x completion.\n", hw->iobase, cmd); err = -ETIMEDOUT; goto out; } status = hermes_read_regn(hw, STATUS); if (resp) { resp->status = status; resp->resp0 = hermes_read_regn(hw, RESP0); resp->resp1 = hermes_read_regn(hw, RESP1); resp->resp2 = hermes_read_regn(hw, RESP2); } hermes_write_regn(hw, EVACK, HERMES_EV_CMD); if (status & HERMES_STATUS_RESULT) err = -EIO; out: return err; } static int hermes_allocate(struct hermes *hw, u16 size, u16 *fid) { int err = 0; int k; u16 reg; if ((size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX)) return -EINVAL; err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL); if (err) return err; reg = hermes_read_regn(hw, EVSTAT); k = ALLOC_COMPL_TIMEOUT; while ((!(reg & HERMES_EV_ALLOC)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } if (!hermes_present(hw)) { printk(KERN_WARNING "hermes @ %p: " "Card removed waiting for frame allocation.\n", hw->iobase); return -ENODEV; } if (!(reg & HERMES_EV_ALLOC)) { printk(KERN_ERR "hermes @ %p: " "Timeout waiting for frame allocation\n", hw->iobase); return -ETIMEDOUT; } *fid = hermes_read_regn(hw, ALLOCFID); hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC); return 0; } /* Set up a BAP to read a particular chunk of data from card's internal buffer. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error * from firmware * * Callable from any context */ static int hermes_bap_seek(struct hermes *hw, int bap, u16 id, u16 offset) { int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0; int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0; int k; u16 reg; /* Paranoia.. */ if ((offset > HERMES_BAP_OFFSET_MAX) || (offset % 2)) return -EINVAL; k = HERMES_BAP_BUSY_TIMEOUT; reg = hermes_read_reg(hw, oreg); while ((reg & HERMES_OFFSET_BUSY) && k) { k--; udelay(1); reg = hermes_read_reg(hw, oreg); } if (reg & HERMES_OFFSET_BUSY) return -ETIMEDOUT; /* Now we actually set up the transfer */ hermes_write_reg(hw, sreg, id); hermes_write_reg(hw, oreg, offset); /* Wait for the BAP to be ready */ k = HERMES_BAP_BUSY_TIMEOUT; reg = hermes_read_reg(hw, oreg); while ((reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) { k--; udelay(1); reg = hermes_read_reg(hw, oreg); } if (reg != offset) { printk(KERN_ERR "hermes @ %p: BAP%d offset %s: " "reg=0x%x id=0x%x offset=0x%x\n", hw->iobase, bap, (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error", reg, id, offset); if (reg & HERMES_OFFSET_BUSY) return -ETIMEDOUT; return -EIO; /* error or wrong offset */ } return 0; } /* Read a block of data from the chip's buffer, via the * BAP. Synchronization/serialization is the caller's problem. len * must be even. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error from firmware */ static int hermes_bap_pread(struct hermes *hw, int bap, void *buf, int len, u16 id, u16 offset) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; if ((len < 0) || (len % 2)) return -EINVAL; err = hermes_bap_seek(hw, bap, id, offset); if (err) goto out; /* Actually do the transfer */ hermes_read_words(hw, dreg, buf, len / 2); out: return err; } /* Write a block of data to the chip's buffer, via the * BAP. Synchronization/serialization is the caller's problem. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error from firmware */ static int hermes_bap_pwrite(struct hermes *hw, int bap, const void *buf, int len, u16 id, u16 offset) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; if (len < 0) return -EINVAL; err = hermes_bap_seek(hw, bap, id, offset); if (err) goto out; /* Actually do the transfer */ hermes_write_bytes(hw, dreg, buf, len); out: return err; } /* Read a Length-Type-Value record from the card. * * If length is NULL, we ignore the length read from the card, and * read the entire buffer regardless. This is useful because some of * the configuration records appear to have incorrect lengths in * practice. * * Callable from user or bh context. */ static int hermes_read_ltv(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf) { int err = 0; int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; u16 rlength, rtype; unsigned nwords; if (bufsize % 2) return -EINVAL; err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL); if (err) return err; err = hermes_bap_seek(hw, bap, rid, 0); if (err) return err; rlength = hermes_read_reg(hw, dreg); if (!rlength) return -ENODATA; rtype = hermes_read_reg(hw, dreg); if (length) *length = rlength; if (rtype != rid) printk(KERN_WARNING "hermes @ %p: %s(): " "rid (0x%04x) does not match type (0x%04x)\n", hw->iobase, __func__, rid, rtype); if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize) printk(KERN_WARNING "hermes @ %p: " "Truncating LTV record from %d to %d bytes. " "(rid=0x%04x, len=0x%04x)\n", hw->iobase, HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength); nwords = min((unsigned)rlength - 1, bufsize / 2); hermes_read_words(hw, dreg, buf, nwords); return 0; } static int hermes_write_ltv(struct hermes *hw, int bap, u16 rid, u16 length, const void *value) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; unsigned count; if (length == 0) return -EINVAL; err = hermes_bap_seek(hw, bap, rid, 0); if (err) return err; hermes_write_reg(hw, dreg, length); hermes_write_reg(hw, dreg, rid); count = length - 1; hermes_write_bytes(hw, dreg, value, count << 1); err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, rid, NULL); return err; } /*** Hermes AUX control ***/ static inline void hermes_aux_setaddr(struct hermes *hw, u32 addr) { hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7)); hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F)); } static inline int hermes_aux_control(struct hermes *hw, int enabled) { int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED; int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE; int i; /* Already open? */ if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state) return 0; hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0); hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1); hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2); hermes_write_reg(hw, HERMES_CONTROL, action); for (i = 0; i < 20; i++) { udelay(10); if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state) return 0; } return -EBUSY; } /*** Hermes programming ***/ /* About to start programming data (Hermes I) * offset is the entry point * * Spectrum_cs' Symbol fw does not require this * wl_lkm Agere fw does * Don't know about intersil */ static int hermesi_program_init(struct hermes *hw, u32 offset) { int err; /* Disable interrupts?*/ /*hw->inten = 0x0;*/ /*hermes_write_regn(hw, INTEN, 0);*/ /*hermes_set_irqmask(hw, 0);*/ /* Acknowledge any outstanding command */ hermes_write_regn(hw, EVACK, 0xFFFF); /* Using init_cmd_wait rather than cmd_wait */ err = hw->ops->init_cmd_wait(hw, 0x0100 | HERMES_CMD_INIT, 0, 0, 0, NULL); if (err) return err; err = hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT, 0, 0, 0, NULL); if (err) return err; err = hermes_aux_control(hw, 1); pr_debug("AUX enable returned %d\n", err); if (err) return err; pr_debug("Enabling volatile, EP 0x%08x\n", offset); err = hw->ops->init_cmd_wait(hw, HERMES_PROGRAM_ENABLE_VOLATILE, offset & 0xFFFFu, offset >> 16, 0, NULL); pr_debug("PROGRAM_ENABLE returned %d\n", err); return err; } /* Done programming data (Hermes I) * * Spectrum_cs' Symbol fw does not require this * wl_lkm Agere fw does * Don't know about intersil */ static int hermesi_program_end(struct hermes *hw) { struct hermes_response resp; int rc = 0; int err; rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp); pr_debug("PROGRAM_DISABLE returned %d, " "r0 0x%04x, r1 0x%04x, r2 0x%04x\n", rc, resp.resp0, resp.resp1, resp.resp2); if ((rc == 0) && ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD)) rc = -EIO; err = hermes_aux_control(hw, 0); pr_debug("AUX disable returned %d\n", err); /* Acknowledge any outstanding command */ hermes_write_regn(hw, EVACK, 0xFFFF); /* Reinitialise, ignoring return */ (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT, 0, 0, 0, NULL); return rc ? rc : err; } static int hermes_program_bytes(struct hermes *hw, const char *data, u32 addr, u32 len) { /* wl lkm splits the programming into chunks of 2000 bytes. * This restriction appears to come from USB. The PCMCIA * adapters can program the whole lot in one go */ hermes_aux_setaddr(hw, addr); hermes_write_bytes(hw, HERMES_AUXDATA, data, len); return 0; } /* Read PDA from the adapter */ static int hermes_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr, u16 pda_len) { int ret; u16 pda_size; u16 data_len = pda_len; __le16 *data = pda; if (hw->eeprom_pda) { /* PDA of spectrum symbol is in eeprom */ /* Issue command to read EEPROM */ ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL); if (ret) return ret; } else { /* wl_lkm does not include PDA size in the PDA area. * We will pad the information into pda, so other routines * don't have to be modified */ pda[0] = cpu_to_le16(pda_len - 2); /* Includes CFG_PROD_DATA but not itself */ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */ data_len = pda_len - 4; data = pda + 2; } /* Open auxiliary port */ ret = hermes_aux_control(hw, 1); pr_debug("AUX enable returned %d\n", ret); if (ret) return ret; /* Read PDA */ hermes_aux_setaddr(hw, pda_addr); hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2); /* Close aux port */ ret = hermes_aux_control(hw, 0); pr_debug("AUX disable returned %d\n", ret); /* Check PDA length */ pda_size = le16_to_cpu(pda[0]); pr_debug("Actual PDA length %d, Max allowed %d\n", pda_size, pda_len); if (pda_size > pda_len) return -EINVAL; return 0; } static void hermes_lock_irqsave(spinlock_t *lock, unsigned long *flags) __acquires(lock) { spin_lock_irqsave(lock, *flags); } static void hermes_unlock_irqrestore(spinlock_t *lock, unsigned long *flags) __releases(lock) { spin_unlock_irqrestore(lock, *flags); } static void hermes_lock_irq(spinlock_t *lock) __acquires(lock) { spin_lock_irq(lock); } static void hermes_unlock_irq(spinlock_t *lock) __releases(lock) { spin_unlock_irq(lock); } /* Hermes operations for local buses */ static const struct hermes_ops hermes_ops_local = { .init = hermes_init, .cmd_wait = hermes_docmd_wait, .init_cmd_wait = hermes_doicmd_wait, .allocate = hermes_allocate, .read_ltv = hermes_read_ltv, .write_ltv = hermes_write_ltv, .bap_pread = hermes_bap_pread, .bap_pwrite = hermes_bap_pwrite, .read_pda = hermes_read_pda, .program_init = hermesi_program_init, .program_end = hermesi_program_end, .program = hermes_program_bytes, .lock_irqsave = hermes_lock_irqsave, .unlock_irqrestore = hermes_unlock_irqrestore, .lock_irq = hermes_lock_irq, .unlock_irq = hermes_unlock_irq, };
gpl-2.0
xenon-cm/android_kernel_samsung_tuna
sound/core/oss/pcm_plugin.c
8153
21871
/* * PCM Plug-In shared (kernel/library) code * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #if 0 #define PLUGIN_DEBUG #endif #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "pcm_plugin.h" #define snd_pcm_plug_first(plug) ((plug)->runtime->oss.plugin_first) #define snd_pcm_plug_last(plug) ((plug)->runtime->oss.plugin_last) /* * because some cards might have rates "very close", we ignore * all "resampling" requests within +-5% */ static int rate_match(unsigned int src_rate, unsigned int dst_rate) { unsigned int low = (src_rate * 95) / 100; unsigned int high = (src_rate * 105) / 100; return dst_rate >= low && dst_rate <= high; } static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t frames) { struct snd_pcm_plugin_format *format; ssize_t width; size_t size; unsigned int channel; struct snd_pcm_plugin_channel *c; if (plugin->stream == SNDRV_PCM_STREAM_PLAYBACK) { format = &plugin->src_format; } else { format = &plugin->dst_format; } if ((width = snd_pcm_format_physical_width(format->format)) < 0) return width; size = frames * format->channels * width; if (snd_BUG_ON(size % 8)) return -ENXIO; size /= 8; if (plugin->buf_frames < frames) { vfree(plugin->buf); plugin->buf = vmalloc(size); plugin->buf_frames = frames; } if (!plugin->buf) { plugin->buf_frames = 0; return -ENOMEM; } c = plugin->buf_channels; if (plugin->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { for (channel = 0; channel < format->channels; channel++, c++) { c->frames = frames; c->enabled = 1; c->wanted = 0; c->area.addr = plugin->buf; c->area.first = channel * width; c->area.step = format->channels * width; } } else if (plugin->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { if (snd_BUG_ON(size % format->channels)) return -EINVAL; size /= format->channels; for (channel = 0; channel < format->channels; channel++, c++) { c->frames = frames; c->enabled = 1; c->wanted = 0; c->area.addr = plugin->buf + (channel * size); c->area.first = 0; c->area.step = width; } } else return -EINVAL; return 0; } int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames) { int err; if (snd_BUG_ON(!snd_pcm_plug_first(plug))) return -ENXIO; if (snd_pcm_plug_stream(plug) == SNDRV_PCM_STREAM_PLAYBACK) { struct snd_pcm_plugin *plugin = snd_pcm_plug_first(plug); while (plugin->next) { if (plugin->dst_frames) frames = plugin->dst_frames(plugin, frames); if (snd_BUG_ON(frames <= 0)) return -ENXIO; plugin = plugin->next; err = snd_pcm_plugin_alloc(plugin, frames); if (err < 0) return err; } } else { struct snd_pcm_plugin *plugin = snd_pcm_plug_last(plug); while (plugin->prev) { if (plugin->src_frames) frames = plugin->src_frames(plugin, frames); if (snd_BUG_ON(frames <= 0)) return -ENXIO; plugin = plugin->prev; err = snd_pcm_plugin_alloc(plugin, frames); if (err < 0) return err; } } return 0; } snd_pcm_sframes_t snd_pcm_plugin_client_channels(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t frames, struct snd_pcm_plugin_channel **channels) { *channels = plugin->buf_channels; return frames; } int snd_pcm_plugin_build(struct snd_pcm_substream *plug, const char *name, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, size_t extra, struct snd_pcm_plugin **ret) { struct snd_pcm_plugin *plugin; unsigned int channels; if (snd_BUG_ON(!plug)) return -ENXIO; if (snd_BUG_ON(!src_format || !dst_format)) return -ENXIO; plugin = kzalloc(sizeof(*plugin) + extra, GFP_KERNEL); if (plugin == NULL) return -ENOMEM; plugin->name = name; plugin->plug = plug; plugin->stream = snd_pcm_plug_stream(plug); plugin->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED; plugin->src_format = *src_format; plugin->src_width = snd_pcm_format_physical_width(src_format->format); snd_BUG_ON(plugin->src_width <= 0); plugin->dst_format = *dst_format; plugin->dst_width = snd_pcm_format_physical_width(dst_format->format); snd_BUG_ON(plugin->dst_width <= 0); if (plugin->stream == SNDRV_PCM_STREAM_PLAYBACK) channels = src_format->channels; else channels = dst_format->channels; plugin->buf_channels = kcalloc(channels, sizeof(*plugin->buf_channels), GFP_KERNEL); if (plugin->buf_channels == NULL) { snd_pcm_plugin_free(plugin); return -ENOMEM; } plugin->client_channels = snd_pcm_plugin_client_channels; *ret = plugin; return 0; } int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) { if (! plugin) return 0; if (plugin->private_free) plugin->private_free(plugin); kfree(plugin->buf_channels); vfree(plugin->buf); kfree(plugin); return 0; } snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; int stream = snd_pcm_plug_stream(plug); if (snd_BUG_ON(!plug)) return -ENXIO; if (drv_frames == 0) return 0; if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_last(plug); while (plugin && drv_frames > 0) { plugin_prev = plugin->prev; if (plugin->src_frames) drv_frames = plugin->src_frames(plugin, drv_frames); plugin = plugin_prev; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_first(plug); while (plugin && drv_frames > 0) { plugin_next = plugin->next; if (plugin->dst_frames) drv_frames = plugin->dst_frames(plugin, drv_frames); plugin = plugin_next; } } else snd_BUG(); return drv_frames; } snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; snd_pcm_sframes_t frames; int stream = snd_pcm_plug_stream(plug); if (snd_BUG_ON(!plug)) return -ENXIO; if (clt_frames == 0) return 0; frames = clt_frames; if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_first(plug); while (plugin && frames > 0) { plugin_next = plugin->next; if (plugin->dst_frames) { frames = plugin->dst_frames(plugin, frames); if (frames < 0) return frames; } plugin = plugin_next; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_last(plug); while (plugin) { plugin_prev = plugin->prev; if (plugin->src_frames) { frames = plugin->src_frames(plugin, frames); if (frames < 0) return frames; } plugin = plugin_prev; } } else snd_BUG(); return frames; } static int snd_pcm_plug_formats(struct snd_mask *mask, snd_pcm_format_t format) { struct snd_mask formats = *mask; u64 linfmts = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_BE | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_BE | SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE | SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_BE | SNDRV_PCM_FMTBIT_S32_BE); snd_mask_set(&formats, (__force int)SNDRV_PCM_FORMAT_MU_LAW); if (formats.bits[0] & (u32)linfmts) formats.bits[0] |= (u32)linfmts; if (formats.bits[1] & (u32)(linfmts >> 32)) formats.bits[1] |= (u32)(linfmts >> 32); return snd_mask_test(&formats, (__force int)format); } static snd_pcm_format_t preferred_formats[] = { SNDRV_PCM_FORMAT_S16_LE, SNDRV_PCM_FORMAT_S16_BE, SNDRV_PCM_FORMAT_U16_LE, SNDRV_PCM_FORMAT_U16_BE, SNDRV_PCM_FORMAT_S24_3LE, SNDRV_PCM_FORMAT_S24_3BE, SNDRV_PCM_FORMAT_U24_3LE, SNDRV_PCM_FORMAT_U24_3BE, SNDRV_PCM_FORMAT_S24_LE, SNDRV_PCM_FORMAT_S24_BE, SNDRV_PCM_FORMAT_U24_LE, SNDRV_PCM_FORMAT_U24_BE, SNDRV_PCM_FORMAT_S32_LE, SNDRV_PCM_FORMAT_S32_BE, SNDRV_PCM_FORMAT_U32_LE, SNDRV_PCM_FORMAT_U32_BE, SNDRV_PCM_FORMAT_S8, SNDRV_PCM_FORMAT_U8 }; snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format, struct snd_mask *format_mask) { int i; if (snd_mask_test(format_mask, (__force int)format)) return format; if (!snd_pcm_plug_formats(format_mask, format)) return (__force snd_pcm_format_t)-EINVAL; if (snd_pcm_format_linear(format)) { unsigned int width = snd_pcm_format_width(format); int unsignd = snd_pcm_format_unsigned(format) > 0; int big = snd_pcm_format_big_endian(format) > 0; unsigned int badness, best = -1; snd_pcm_format_t best_format = (__force snd_pcm_format_t)-1; for (i = 0; i < ARRAY_SIZE(preferred_formats); i++) { snd_pcm_format_t f = preferred_formats[i]; unsigned int w; if (!snd_mask_test(format_mask, (__force int)f)) continue; w = snd_pcm_format_width(f); if (w >= width) badness = w - width; else badness = width - w + 32; badness += snd_pcm_format_unsigned(f) != unsignd; badness += snd_pcm_format_big_endian(f) != big; if (badness < best) { best_format = f; best = badness; } } if ((__force int)best_format >= 0) return best_format; else return (__force snd_pcm_format_t)-EINVAL; } else { switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: for (i = 0; i < ARRAY_SIZE(preferred_formats); ++i) { snd_pcm_format_t format1 = preferred_formats[i]; if (snd_mask_test(format_mask, (__force int)format1)) return format1; } default: return (__force snd_pcm_format_t)-EINVAL; } } } int snd_pcm_plug_format_plugins(struct snd_pcm_substream *plug, struct snd_pcm_hw_params *params, struct snd_pcm_hw_params *slave_params) { struct snd_pcm_plugin_format tmpformat; struct snd_pcm_plugin_format dstformat; struct snd_pcm_plugin_format srcformat; snd_pcm_access_t src_access, dst_access; struct snd_pcm_plugin *plugin = NULL; int err; int stream = snd_pcm_plug_stream(plug); int slave_interleaved = (params_channels(slave_params) == 1 || params_access(slave_params) == SNDRV_PCM_ACCESS_RW_INTERLEAVED); switch (stream) { case SNDRV_PCM_STREAM_PLAYBACK: dstformat.format = params_format(slave_params); dstformat.rate = params_rate(slave_params); dstformat.channels = params_channels(slave_params); srcformat.format = params_format(params); srcformat.rate = params_rate(params); srcformat.channels = params_channels(params); src_access = SNDRV_PCM_ACCESS_RW_INTERLEAVED; dst_access = (slave_interleaved ? SNDRV_PCM_ACCESS_RW_INTERLEAVED : SNDRV_PCM_ACCESS_RW_NONINTERLEAVED); break; case SNDRV_PCM_STREAM_CAPTURE: dstformat.format = params_format(params); dstformat.rate = params_rate(params); dstformat.channels = params_channels(params); srcformat.format = params_format(slave_params); srcformat.rate = params_rate(slave_params); srcformat.channels = params_channels(slave_params); src_access = (slave_interleaved ? SNDRV_PCM_ACCESS_RW_INTERLEAVED : SNDRV_PCM_ACCESS_RW_NONINTERLEAVED); dst_access = SNDRV_PCM_ACCESS_RW_INTERLEAVED; break; default: snd_BUG(); return -EINVAL; } tmpformat = srcformat; pdprintf("srcformat: format=%i, rate=%i, channels=%i\n", srcformat.format, srcformat.rate, srcformat.channels); pdprintf("dstformat: format=%i, rate=%i, channels=%i\n", dstformat.format, dstformat.rate, dstformat.channels); /* Format change (linearization) */ if (! rate_match(srcformat.rate, dstformat.rate) && ! snd_pcm_format_linear(srcformat.format)) { if (srcformat.format != SNDRV_PCM_FORMAT_MU_LAW) return -EINVAL; tmpformat.format = SNDRV_PCM_FORMAT_S16; err = snd_pcm_plugin_build_mulaw(plug, &srcformat, &tmpformat, &plugin); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } /* channels reduction */ if (srcformat.channels > dstformat.channels) { tmpformat.channels = dstformat.channels; err = snd_pcm_plugin_build_route(plug, &srcformat, &tmpformat, &plugin); pdprintf("channels reduction: src=%i, dst=%i returns %i\n", srcformat.channels, tmpformat.channels, err); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } /* rate resampling */ if (!rate_match(srcformat.rate, dstformat.rate)) { if (srcformat.format != SNDRV_PCM_FORMAT_S16) { /* convert to S16 for resampling */ tmpformat.format = SNDRV_PCM_FORMAT_S16; err = snd_pcm_plugin_build_linear(plug, &srcformat, &tmpformat, &plugin); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } tmpformat.rate = dstformat.rate; err = snd_pcm_plugin_build_rate(plug, &srcformat, &tmpformat, &plugin); pdprintf("rate down resampling: src=%i, dst=%i returns %i\n", srcformat.rate, tmpformat.rate, err); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } /* format change */ if (srcformat.format != dstformat.format) { tmpformat.format = dstformat.format; if (srcformat.format == SNDRV_PCM_FORMAT_MU_LAW || tmpformat.format == SNDRV_PCM_FORMAT_MU_LAW) { err = snd_pcm_plugin_build_mulaw(plug, &srcformat, &tmpformat, &plugin); } else if (snd_pcm_format_linear(srcformat.format) && snd_pcm_format_linear(tmpformat.format)) { err = snd_pcm_plugin_build_linear(plug, &srcformat, &tmpformat, &plugin); } else return -EINVAL; pdprintf("format change: src=%i, dst=%i returns %i\n", srcformat.format, tmpformat.format, err); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } /* channels extension */ if (srcformat.channels < dstformat.channels) { tmpformat.channels = dstformat.channels; err = snd_pcm_plugin_build_route(plug, &srcformat, &tmpformat, &plugin); pdprintf("channels extension: src=%i, dst=%i returns %i\n", srcformat.channels, tmpformat.channels, err); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } srcformat = tmpformat; src_access = dst_access; } /* de-interleave */ if (src_access != dst_access) { err = snd_pcm_plugin_build_copy(plug, &srcformat, &tmpformat, &plugin); pdprintf("interleave change (copy: returns %i)\n", err); if (err < 0) return err; err = snd_pcm_plugin_append(plugin); if (err < 0) { snd_pcm_plugin_free(plugin); return err; } } return 0; } snd_pcm_sframes_t snd_pcm_plug_client_channels_buf(struct snd_pcm_substream *plug, char *buf, snd_pcm_uframes_t count, struct snd_pcm_plugin_channel **channels) { struct snd_pcm_plugin *plugin; struct snd_pcm_plugin_channel *v; struct snd_pcm_plugin_format *format; int width, nchannels, channel; int stream = snd_pcm_plug_stream(plug); if (snd_BUG_ON(!buf)) return -ENXIO; if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_first(plug); format = &plugin->src_format; } else { plugin = snd_pcm_plug_last(plug); format = &plugin->dst_format; } v = plugin->buf_channels; *channels = v; if ((width = snd_pcm_format_physical_width(format->format)) < 0) return width; nchannels = format->channels; if (snd_BUG_ON(plugin->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && format->channels > 1)) return -ENXIO; for (channel = 0; channel < nchannels; channel++, v++) { v->frames = count; v->enabled = 1; v->wanted = (stream == SNDRV_PCM_STREAM_CAPTURE); v->area.addr = buf; v->area.first = channel * width; v->area.step = nchannels * width; } return count; } snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *src_channels, snd_pcm_uframes_t size) { struct snd_pcm_plugin *plugin, *next; struct snd_pcm_plugin_channel *dst_channels; int err; snd_pcm_sframes_t frames = size; plugin = snd_pcm_plug_first(plug); while (plugin && frames > 0) { if ((next = plugin->next) != NULL) { snd_pcm_sframes_t frames1 = frames; if (plugin->dst_frames) frames1 = plugin->dst_frames(plugin, frames); if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { return err; } if (err != frames1) { frames = err; if (plugin->src_frames) frames = plugin->src_frames(plugin, frames1); } } else dst_channels = NULL; pdprintf("write plugin: %s, %li\n", plugin->name, frames); if ((frames = plugin->transfer(plugin, src_channels, dst_channels, frames)) < 0) return frames; src_channels = dst_channels; plugin = next; } return snd_pcm_plug_client_size(plug, frames); } snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size) { struct snd_pcm_plugin *plugin, *next; struct snd_pcm_plugin_channel *src_channels, *dst_channels; snd_pcm_sframes_t frames = size; int err; frames = snd_pcm_plug_slave_size(plug, frames); if (frames < 0) return frames; src_channels = NULL; plugin = snd_pcm_plug_first(plug); while (plugin && frames > 0) { if ((next = plugin->next) != NULL) { if ((err = plugin->client_channels(plugin, frames, &dst_channels)) < 0) { return err; } frames = err; } else { dst_channels = dst_channels_final; } pdprintf("read plugin: %s, %li\n", plugin->name, frames); if ((frames = plugin->transfer(plugin, src_channels, dst_channels, frames)) < 0) return frames; plugin = next; src_channels = dst_channels; } return frames; } int snd_pcm_area_silence(const struct snd_pcm_channel_area *dst_area, size_t dst_offset, size_t samples, snd_pcm_format_t format) { /* FIXME: sub byte resolution and odd dst_offset */ unsigned char *dst; unsigned int dst_step; int width; const unsigned char *silence; if (!dst_area->addr) return 0; dst = dst_area->addr + (dst_area->first + dst_area->step * dst_offset) / 8; width = snd_pcm_format_physical_width(format); if (width <= 0) return -EINVAL; if (dst_area->step == (unsigned int) width && width >= 8) return snd_pcm_format_set_silence(format, dst, samples); silence = snd_pcm_format_silence_64(format); if (! silence) return -EINVAL; dst_step = dst_area->step / 8; if (width == 4) { /* Ima ADPCM */ int dstbit = dst_area->first % 8; int dstbit_step = dst_area->step % 8; while (samples-- > 0) { if (dstbit) *dst &= 0xf0; else *dst &= 0x0f; dst += dst_step; dstbit += dstbit_step; if (dstbit == 8) { dst++; dstbit = 0; } } } else { width /= 8; while (samples-- > 0) { memcpy(dst, silence, width); dst += dst_step; } } return 0; } int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_area, size_t src_offset, const struct snd_pcm_channel_area *dst_area, size_t dst_offset, size_t samples, snd_pcm_format_t format) { /* FIXME: sub byte resolution and odd dst_offset */ char *src, *dst; int width; int src_step, dst_step; src = src_area->addr + (src_area->first + src_area->step * src_offset) / 8; if (!src_area->addr) return snd_pcm_area_silence(dst_area, dst_offset, samples, format); dst = dst_area->addr + (dst_area->first + dst_area->step * dst_offset) / 8; if (!dst_area->addr) return 0; width = snd_pcm_format_physical_width(format); if (width <= 0) return -EINVAL; if (src_area->step == (unsigned int) width && dst_area->step == (unsigned int) width && width >= 8) { size_t bytes = samples * width / 8; memcpy(dst, src, bytes); return 0; } src_step = src_area->step / 8; dst_step = dst_area->step / 8; if (width == 4) { /* Ima ADPCM */ int srcbit = src_area->first % 8; int srcbit_step = src_area->step % 8; int dstbit = dst_area->first % 8; int dstbit_step = dst_area->step % 8; while (samples-- > 0) { unsigned char srcval; if (srcbit) srcval = *src & 0x0f; else srcval = (*src & 0xf0) >> 4; if (dstbit) *dst = (*dst & 0xf0) | srcval; else *dst = (*dst & 0x0f) | (srcval << 4); src += src_step; srcbit += srcbit_step; if (srcbit == 8) { src++; srcbit = 0; } dst += dst_step; dstbit += dstbit_step; if (dstbit == 8) { dst++; dstbit = 0; } } } else { width /= 8; while (samples-- > 0) { memcpy(dst, src, width); src += src_step; dst += dst_step; } } return 0; }
gpl-2.0
whitemolecule/note8-molecule-kernel
tools/perf/arch/x86/util/dwarf-regs.c
9433
1795
/* * dwarf-regs.c : Mapping of DWARF debug register numbers into register names. * Extracted from probe-finder.c * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <libio.h> #include <dwarf-regs.h> /* * Generic dwarf analysis helpers */ #define X86_32_MAX_REGS 8 const char *x86_32_regs_table[X86_32_MAX_REGS] = { "%ax", "%cx", "%dx", "%bx", "$stack", /* Stack address instead of %sp */ "%bp", "%si", "%di", }; #define X86_64_MAX_REGS 16 const char *x86_64_regs_table[X86_64_MAX_REGS] = { "%ax", "%dx", "%cx", "%bx", "%si", "%di", "%bp", "%sp", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", }; /* TODO: switching by dwarf address size */ #ifdef __x86_64__ #define ARCH_MAX_REGS X86_64_MAX_REGS #define arch_regs_table x86_64_regs_table #else #define ARCH_MAX_REGS X86_32_MAX_REGS #define arch_regs_table x86_32_regs_table #endif /* Return architecture dependent register string (for kprobe-tracer) */ const char *get_arch_regstr(unsigned int n) { return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; }
gpl-2.0
bkury/OpenPHT
lib/cpluff/libcpluff/context.c
218
13954
/*------------------------------------------------------------------------- * C-Pluff, a plug-in framework for C * Copyright 2007 Johannes Lehtinen * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *-----------------------------------------------------------------------*/ /** @file * Plug-in context implementation */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <stdarg.h> #include <string.h> #include "../kazlib/list.h" #include "cpluff.h" #include "util.h" #ifdef CP_THREADS #include "thread.h" #endif #include "internal.h" /* ------------------------------------------------------------------------ * Variables * ----------------------------------------------------------------------*/ /// Existing contexts static list_t *contexts = NULL; /* ------------------------------------------------------------------------ * Function definitions * ----------------------------------------------------------------------*/ // Generic static void free_plugin_env(cp_plugin_env_t *env) { assert(env != NULL); // Free environment data if (env->plugin_listeners != NULL) { cpi_unregister_plisteners(env->plugin_listeners, NULL); list_destroy(env->plugin_listeners); env->plugin_listeners = NULL; } if (env->loggers != NULL) { cpi_unregister_loggers(env->loggers, NULL); list_destroy(env->loggers); env->loggers = NULL; } if (env->plugin_dirs != NULL) { list_process(env->plugin_dirs, NULL, cpi_process_free_ptr); list_destroy(env->plugin_dirs); env->plugin_dirs = NULL; } if (env->infos != NULL) { assert(hash_isempty(env->infos)); hash_destroy(env->infos); env->infos = NULL; } if (env->plugins != NULL) { assert(hash_isempty(env->plugins)); hash_destroy(env->plugins); env->plugins = NULL; } if (env->started_plugins != NULL) { assert(list_isempty(env->started_plugins)); list_destroy(env->started_plugins); env->started_plugins = NULL; } if (env->ext_points != NULL) { assert(hash_isempty(env->ext_points)); hash_destroy(env->ext_points); } if (env->extensions != NULL) { assert(hash_isempty(env->extensions)); hash_destroy(env->extensions); } if (env->run_funcs != NULL) { assert(list_isempty(env->run_funcs)); list_destroy(env->run_funcs); } // Destroy mutex #ifdef CP_THREADS if (env->mutex != NULL) { cpi_destroy_mutex(env->mutex); } #endif // Free environment free(env); } CP_HIDDEN void cpi_free_context(cp_context_t *context) { assert(context != NULL); // Free environment if this is the client program context if (context->plugin == NULL && context->env != NULL) { free_plugin_env(context->env); } // Destroy symbol lists if (context->resolved_symbols != NULL) { assert(hash_isempty(context->resolved_symbols)); hash_destroy(context->resolved_symbols); } if (context->symbol_providers != NULL) { assert(hash_isempty(context->symbol_providers)); hash_destroy(context->symbol_providers); } // Free context free(context); } CP_HIDDEN cp_context_t * cpi_new_context(cp_plugin_t *plugin, cp_plugin_env_t *env, cp_status_t *error) { cp_context_t *context = NULL; cp_status_t status = CP_OK; assert(env != NULL); assert(error != NULL); do { // Allocate memory for the context if ((context = malloc(sizeof(cp_context_t))) == NULL) { status = CP_ERR_RESOURCE; break; } // Initialize context context->plugin = plugin; context->env = env; context->resolved_symbols = NULL; context->symbol_providers = NULL; } while (0); // Free context on error if (status != CP_OK && context != NULL) { free(context); context = NULL; } *error = status; return context; } CP_C_API cp_context_t * cp_create_context(cp_status_t *error) { cp_plugin_env_t *env = NULL; cp_context_t *context = NULL; cp_status_t status = CP_OK; // Initialize internal state do { // Allocate memory for the plug-in environment if ((env = malloc(sizeof(cp_plugin_env_t))) == NULL) { status = CP_ERR_RESOURCE; break; } // Initialize plug-in environment memset(env, 0, sizeof(cp_plugin_env_t)); #ifdef CP_THREADS env->mutex = cpi_create_mutex(); #endif env->argc = 0; env->argv = NULL; env->plugin_listeners = list_create(LISTCOUNT_T_MAX); env->loggers = list_create(LISTCOUNT_T_MAX); env->log_min_severity = CP_LOG_NONE; env->plugin_dirs = list_create(LISTCOUNT_T_MAX); env->infos = hash_create(HASHCOUNT_T_MAX, cpi_comp_ptr, cpi_hashfunc_ptr); env->plugins = hash_create(HASHCOUNT_T_MAX, (int (*)(const void *, const void *)) strcmp, NULL); env->started_plugins = list_create(LISTCOUNT_T_MAX); env->ext_points = hash_create(HASHCOUNT_T_MAX, (int (*)(const void *, const void *)) strcmp, NULL); env->extensions = hash_create(HASHCOUNT_T_MAX, (int (*)(const void *, const void *)) strcmp, NULL); env->run_funcs = list_create(LISTCOUNT_T_MAX); env->run_wait = NULL; if (env->plugin_listeners == NULL || env->loggers == NULL #ifdef CP_THREADS || env->mutex == NULL #endif || env->plugin_dirs == NULL || env->infos == NULL || env->plugins == NULL || env->started_plugins == NULL || env->ext_points == NULL || env->extensions == NULL || env->run_funcs == NULL) { status = CP_ERR_RESOURCE; break; } // Create the plug-in context if ((context = cpi_new_context(NULL, env, &status)) == NULL) { break; } env = NULL; // Create a context list, if necessary, and add context to the list cpi_lock_framework(); if (contexts == NULL) { if ((contexts = list_create(LISTCOUNT_T_MAX)) == NULL) { status = CP_ERR_RESOURCE; } } if (status == CP_OK) { lnode_t *node; if ((node = lnode_create(context)) == NULL) { status = CP_ERR_RESOURCE; } else { list_append(contexts, node); } } cpi_unlock_framework(); } while (0); // Release resources on failure if (status != CP_OK) { if (env != NULL) { free_plugin_env(env); } if (context != NULL) { cpi_free_context(context); } context = NULL; } // Return the final status if (error != NULL) { *error = status; } // Return the context (or NULL on failure) return context; } CP_C_API void cp_destroy_context(cp_context_t *context) { CHECK_NOT_NULL(context); if (context->plugin != NULL) { cpi_fatalf(_("Only the main program can destroy a plug-in context.")); } // Check invocation cpi_lock_context(context); cpi_check_invocation(context, CPI_CF_ANY, __func__); cpi_unlock_context(context); #ifdef CP_THREADS assert(context->env->mutex == NULL || !cpi_is_mutex_locked(context->env->mutex)); #else assert(!context->env->locked); #endif // Remove context from the context list cpi_lock_framework(); if (contexts != NULL) { lnode_t *node; if ((node = list_find(contexts, context, cpi_comp_ptr)) != NULL) { list_delete(contexts, node); lnode_destroy(node); } } cpi_unlock_framework(); // Unload all plug-ins cp_uninstall_plugins(context); // Release remaining information objects cpi_release_infos(context); // Free context cpi_free_context(context); } CP_HIDDEN void cpi_destroy_all_contexts(void) { cpi_lock_framework(); if (contexts != NULL) { lnode_t *node; while ((node = list_last(contexts)) != NULL) { cpi_unlock_framework(); cp_destroy_context(lnode_get(node)); cpi_lock_framework(); } list_destroy(contexts); contexts = NULL; } cpi_unlock_framework(); } // Plug-in directories CP_C_API cp_status_t cp_register_pcollection(cp_context_t *context, const char *dir) { char *d = NULL; lnode_t *node = NULL; cp_status_t status = CP_OK; CHECK_NOT_NULL(context); CHECK_NOT_NULL(dir); cpi_lock_context(context); cpi_check_invocation(context, CPI_CF_ANY, __func__); do { // Check if directory has already been registered if (list_find(context->env->plugin_dirs, dir, (int (*)(const void *, const void *)) strcmp) != NULL) { break; } // Allocate resources d = malloc(sizeof(char) * (strlen(dir) + 1)); node = lnode_create(d); if (d == NULL || node == NULL) { status = CP_ERR_RESOURCE; break; } // Register directory strcpy(d, dir); list_append(context->env->plugin_dirs, node); } while (0); // Report error or success if (status != CP_OK) { cpi_errorf(context, N_("The plug-in collection in path %s could not be registered due to insufficient memory."), dir); } else { cpi_debugf(context, N_("The plug-in collection in path %s was registered."), dir); } cpi_unlock_context(context); // Release resources on failure if (status != CP_OK) { if (d != NULL) { free(d); } if (node != NULL) { lnode_destroy(node); } } return status; } CP_C_API void cp_unregister_pcollection(cp_context_t *context, const char *dir) { char *d; lnode_t *node; CHECK_NOT_NULL(context); CHECK_NOT_NULL(dir); cpi_lock_context(context); cpi_check_invocation(context, CPI_CF_ANY, __func__); node = list_find(context->env->plugin_dirs, dir, (int (*)(const void *, const void *)) strcmp); if (node != NULL) { d = lnode_get(node); list_delete(context->env->plugin_dirs, node); lnode_destroy(node); free(d); } cpi_debugf(context, N_("The plug-in collection in path %s was unregistered."), dir); cpi_unlock_context(context); } CP_C_API void cp_unregister_pcollections(cp_context_t *context) { CHECK_NOT_NULL(context); cpi_lock_context(context); cpi_check_invocation(context, CPI_CF_ANY, __func__); list_process(context->env->plugin_dirs, NULL, cpi_process_free_ptr); cpi_debug(context, N_("All plug-in collections were unregistered.")); cpi_unlock_context(context); } // Startup arguments CP_C_API void cp_set_context_args(cp_context_t *ctx, char **argv) { int argc; CHECK_NOT_NULL(ctx); CHECK_NOT_NULL(argv); for (argc = 0; argv[argc] != NULL; argc++); if (argc < 1) { cpi_fatalf(_("At least one startup argument must be given in call to function %s."), __func__); } cpi_lock_context(ctx); ctx->env->argc = argc; ctx->env->argv = argv; cpi_unlock_context(ctx); } CP_C_API char **cp_get_context_args(cp_context_t *ctx, int *argc) { char **argv; CHECK_NOT_NULL(ctx); cpi_lock_context(ctx); if (argc != NULL) { *argc = ctx->env->argc; } argv = ctx->env->argv; cpi_unlock_context(ctx); return argv; } // Checking API call invocation CP_HIDDEN void cpi_check_invocation(cp_context_t *ctx, int funcmask, const char *func) { assert(ctx != NULL); assert(funcmask != 0); assert(func != NULL); assert(cpi_is_context_locked(ctx)); if ((funcmask & CPI_CF_LOGGER) &&ctx->env->in_logger_invocation) { cpi_fatalf(_("Function %s was called from within a logger invocation."), func); } if ((funcmask & CPI_CF_LISTENER) && ctx->env->in_event_listener_invocation) { cpi_fatalf(_("Function %s was called from within an event listener invocation."), func); } if ((funcmask & CPI_CF_START) && ctx->env->in_start_func_invocation) { cpi_fatalf(_("Function %s was called from within a plug-in start function invocation."), func); } if ((funcmask & CPI_CF_STOP) && ctx->env->in_stop_func_invocation) { cpi_fatalf(_("Function %s was called from within a plug-in stop function invocation."), func); } if (ctx->env->in_create_func_invocation) { cpi_fatalf(_("Function %s was called from within a plug-in create function invocation."), func); } if (ctx->env->in_destroy_func_invocation) { cpi_fatalf(_("Function %s was called from within a plug-in destroy function invocation."), func); } } // Locking #if defined(CP_THREADS) || !defined(NDEBUG) CP_HIDDEN void cpi_lock_context(cp_context_t *context) { #if defined(CP_THREADS) cpi_lock_mutex(context->env->mutex); #elif !defined(NDEBUG) context->env->locked++; #endif } CP_HIDDEN void cpi_unlock_context(cp_context_t *context) { #if defined(CP_THREADS) cpi_unlock_mutex(context->env->mutex); #elif !defined(NDEBUG) assert(context->env->locked > 0); context->env->locked--; #endif } CP_HIDDEN void cpi_wait_context(cp_context_t *context) { #if defined(CP_THREADS) cpi_wait_mutex(context->env->mutex); #elif !defined(NDEBUG) assert(context->env->locked > 0); assert(0); #endif } CP_HIDDEN void cpi_signal_context(cp_context_t *context) { #if defined(CP_THREADS) cpi_signal_mutex(context->env->mutex); #elif !defined(NDEBUG) assert(context->env->locked > 0); #endif } // Debug helpers CP_HIDDEN char *cpi_context_owner(cp_context_t *ctx, char *name, size_t size) { if (ctx->plugin != NULL) { /* TRANSLATORS: The context owner (when it is a plug-in) used in some strings. Search for "context owner" to find these strings. */ snprintf(name, size, _("Plug-in %s"), ctx->plugin->plugin->identifier); } else { /* TRANSLATORS: The context owner (when it is the main program) used in some strings. Search for "context owner" to find these strings. */ strncpy(name, _("The main program"), size); } assert(size >= 4); strcpy(name + size - 4, "..."); return name; } #endif
gpl-2.0
aopp/android_kernel_nvidia_shieldtablet
drivers/iio/imu/inv_mpu/inv_compass/inv_ak89xx_core.c
218
15498
/* * Copyright (C) 2013 Invensense, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/sysfs.h> #include <linux/jiffies.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/poll.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include "inv_ak89xx_iio.h" #include "sysfs.h" #include "inv_test/inv_counters.h" static s64 get_time_ns(void) { struct timespec ts; ktime_get_ts(&ts); return timespec_to_ns(&ts); } /** * inv_serial_read() - Read one or more bytes from the device registers. * @st: Device driver instance. * @reg: First device register to be read from. * @length: Number of bytes to read. * @data: Data read from device. * NOTE: The slave register will not increment when reading from the FIFO. */ int inv_serial_read(struct inv_ak89xx_state_s *st, u8 reg, u16 length, u8 *data) { int result; INV_I2C_INC_COMPASSWRITE(3); INV_I2C_INC_COMPASSREAD(length); result = i2c_smbus_read_i2c_block_data(st->i2c, reg, length, data); if (result != length) { if (result < 0) return result; else return -EINVAL; } else { return 0; } } /** * inv_serial_single_write() - Write a byte to a device register. * @st: Device driver instance. * @reg: Device register to be written to. * @data: Byte to write to device. */ int inv_serial_single_write(struct inv_ak89xx_state_s *st, u8 reg, u8 data) { u8 d[1]; d[0] = data; INV_I2C_INC_COMPASSWRITE(3); return i2c_smbus_write_i2c_block_data(st->i2c, reg, 1, d); } static int ak89xx_init(struct inv_ak89xx_state_s *st) { int result = 0; unsigned char serial_data[3]; result = inv_serial_single_write(st, AK89XX_REG_CNTL, AK89XX_CNTL_MODE_POWER_DOWN); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } /* Wait at least 100us */ udelay(100); result = inv_serial_single_write(st, AK89XX_REG_CNTL, AK89XX_CNTL_MODE_FUSE_ACCESS); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } /* Wait at least 200us */ udelay(200); result = inv_serial_read(st, AK89XX_FUSE_ASAX, 3, serial_data); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } st->asa[0] = serial_data[0]; st->asa[1] = serial_data[1]; st->asa[2] = serial_data[2]; result = inv_serial_single_write(st, AK89XX_REG_CNTL, AK89XX_CNTL_MODE_POWER_DOWN); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } udelay(100); return result; } int ak89xx_read(struct inv_ak89xx_state_s *st, short rawfixed[3]) { unsigned char regs[8]; unsigned char *stat = &regs[0]; unsigned char *stat2 = &regs[7]; int result = 0; int status = 0; result = inv_serial_read(st, AK89XX_REG_ST1, 8, regs); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } rawfixed[0] = (short)((regs[2]<<8) | regs[1]); rawfixed[1] = (short)((regs[4]<<8) | regs[3]); rawfixed[2] = (short)((regs[6]<<8) | regs[5]); /* * ST : data ready - * Measurement has been completed and data is ready to be read. */ if (*stat & 0x01) status = 0; /* * ST2 : data error - * occurs when data read is started outside of a readable period; * data read would not be correct. * Valid in continuous measurement mode only. * In single measurement mode this error should not occour but we * stil account for it and return an error, since the data would be * corrupted. * DERR bit is self-clearing when ST2 register is read. */ if (*stat2 & 0x04) status = 0x04; /* * ST2 : overflow - * the sum of the absolute values of all axis |X|+|Y|+|Z| < 2400uT. * This is likely to happen in presence of an external magnetic * disturbance; it indicates, the sensor data is incorrect and should * be ignored. * An error is returned. * HOFL bit clears when a new measurement starts. */ if (*stat2 & 0x08) status = 0x08; /* * ST : overrun - * the previous sample was not fetched and lost. * Valid in continuous measurement mode only. * In single measurement mode this error should not occour and we * don't consider this condition an error. * DOR bit is self-clearing when ST2 or any meas. data register is * read. */ if (*stat & 0x02) { /* status = INV_ERROR_COMPASS_DATA_UNDERFLOW; */ status = 0; } /* * trigger next measurement if: * - stat is non zero; * - if stat is zero and stat2 is non zero. * Won't trigger if data is not ready and there was no error. */ if (1) { unsigned char scale = 0; if (st->compass_id == COMPASS_ID_AK8963) scale = st->compass_scale; result = inv_serial_single_write(st, AK89XX_REG_CNTL, (scale << 4) | AK89XX_CNTL_MODE_SNG_MEASURE); if (result) { pr_err("%s, line=%d\n", __func__, __LINE__); return result; } } else pr_err("%s, no next measure(0x%x,0x%x)\n", __func__, *stat, *stat2); if (status) pr_err("%s, line=%d, status=%d\n", __func__, __LINE__, status); return status; } /** * ak89xx_read_raw() - read raw method. */ static int ak89xx_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct inv_ak89xx_state_s *st = iio_priv(indio_dev); int scale = 0; switch (mask) { case 0: if (!(iio_buffer_enabled(indio_dev))) return -EINVAL; if (chan->type == IIO_MAGN) { *val = st->compass_data[chan->channel2 - IIO_MOD_X]; return IIO_VAL_INT; } return -EINVAL; case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_MAGN) { if (st->compass_id == COMPASS_ID_AK8975) scale = 9830; else if (st->compass_id == COMPASS_ID_AK8972) scale = 19661; else if (st->compass_id == COMPASS_ID_AK8963) { if (st->compass_scale) scale = 4915; /* 16 bit */ else scale = 19661; /* 14 bit */ } scale *= (1L << 15); *val = scale; return IIO_VAL_INT; } return -EINVAL; default: return -EINVAL; } } static ssize_t ak89xx_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); short c[3]; mutex_lock(&indio_dev->mlock); c[0] = st->compass_data[0]; c[1] = st->compass_data[1]; c[2] = st->compass_data[2]; mutex_unlock(&indio_dev->mlock); return sprintf(buf, "%d, %d, %d\n", c[0], c[1], c[2]); } static ssize_t ak89xx_scale_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); int scale = 0; if (st->compass_id == COMPASS_ID_AK8975) scale = 9830; else if (st->compass_id == COMPASS_ID_AK8972) scale = 19661; else if (st->compass_id == COMPASS_ID_AK8963) { if (st->compass_scale) scale = 4915; /* 16 bit */ else scale = 19661; /* 14 bit */ } scale *= (1L << 15); return sprintf(buf, "%d\n", scale); } static ssize_t ak89xx_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); /* transform delay in ms to rate */ return sprintf(buf, "%d\n", (1000 / st->delay)); } /** * ak89xx_matrix_show() - show orientation matrix */ static ssize_t ak89xx_matrix_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); signed char *m; struct inv_ak89xx_state_s *st = iio_priv(indio_dev); m = st->plat_data.orientation; return sprintf(buf, "%d,%d,%d,%d,%d,%d,%d,%d,%d\n", m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]); } void set_ak89xx_enable(struct iio_dev *indio_dev, bool enable) { struct inv_ak89xx_state_s *st = iio_priv(indio_dev); int result = 0; unsigned char scale = 0; if (st->compass_id == COMPASS_ID_AK8963) scale = st->compass_scale; if (enable) { result = inv_serial_single_write(st, AK89XX_REG_CNTL, (scale << 4) | AK89XX_CNTL_MODE_SNG_MEASURE); if (result) pr_err("%s, line=%d\n", __func__, __LINE__); schedule_delayed_work(&st->work, msecs_to_jiffies(st->delay)); } else { cancel_delayed_work_sync(&st->work); result = inv_serial_single_write(st, AK89XX_REG_CNTL, (scale << 4) | AK89XX_CNTL_MODE_POWER_DOWN); if (result) pr_err("%s, line=%d\n", __func__, __LINE__); mdelay(1); /* wait at least 100us */ } } static ssize_t ak89xx_scale_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); unsigned long data, result; result = kstrtoul(buf, 10, &data); if (result) return result; if (st->compass_id == COMPASS_ID_AK8963) st->compass_scale = !!data; return count; } static ssize_t ak89xx_rate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long data; int error; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); error = kstrtoul(buf, 10, &data); if (error) return error; /* transform rate to delay in ms */ data = 1000 / data; if (data > AK89XX_MAX_DELAY) data = AK89XX_MAX_DELAY; if (data < AK89XX_MIN_DELAY) data = AK89XX_MIN_DELAY; st->delay = (unsigned int) data; return count; } static void ak89xx_work_func(struct work_struct *work) { struct inv_ak89xx_state_s *st = container_of((struct delayed_work *)work, struct inv_ak89xx_state_s, work); struct iio_dev *indio_dev = iio_priv_to_dev(st); unsigned long delay = msecs_to_jiffies(st->delay); mutex_lock(&indio_dev->mlock); if (!(iio_buffer_enabled(indio_dev))) goto error_ret; st->timestamp = get_time_ns(); schedule_delayed_work(&st->work, delay); inv_read_ak89xx_fifo(indio_dev); INV_I2C_INC_COMPASSIRQ(); error_ret: mutex_unlock(&indio_dev->mlock); } static const struct iio_chan_spec compass_channels[] = { { .type = IIO_MAGN, .modified = 1, .channel2 = IIO_MOD_X, .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = INV_AK89XX_SCAN_MAGN_X, .scan_type = IIO_ST('s', 16, 16, 0) }, { .type = IIO_MAGN, .modified = 1, .channel2 = IIO_MOD_Y, .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = INV_AK89XX_SCAN_MAGN_Y, .scan_type = IIO_ST('s', 16, 16, 0) }, { .type = IIO_MAGN, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = INV_AK89XX_SCAN_MAGN_Z, .scan_type = IIO_ST('s', 16, 16, 0) }, IIO_CHAN_SOFT_TIMESTAMP(INV_AK89XX_SCAN_TIMESTAMP) }; static DEVICE_ATTR(value, S_IRUGO, ak89xx_value_show, NULL); static DEVICE_ATTR(scale, S_IRUGO | S_IWUSR, ak89xx_scale_show, ak89xx_scale_store); static DEVICE_ATTR(sampling_frequency, S_IRUGO | S_IWUSR, ak89xx_rate_show, ak89xx_rate_store); static DEVICE_ATTR(compass_matrix, S_IRUGO, ak89xx_matrix_show, NULL); static struct attribute *inv_ak89xx_attributes[] = { &dev_attr_value.attr, &dev_attr_scale.attr, &dev_attr_sampling_frequency.attr, &dev_attr_compass_matrix.attr, NULL, }; static const struct attribute_group inv_attribute_group = { .name = "ak89xx", .attrs = inv_ak89xx_attributes }; static const struct iio_info ak89xx_info = { .driver_module = THIS_MODULE, .read_raw = &ak89xx_read_raw, .attrs = &inv_attribute_group, }; /*constant IIO attribute */ /** * inv_ak89xx_probe() - probe function. */ static int inv_ak89xx_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct inv_ak89xx_state_s *st; struct iio_dev *indio_dev; int result; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { result = -ENODEV; goto out_no_free; } indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { result = -ENOMEM; goto out_no_free; } st = iio_priv(indio_dev); st->i2c = client; st->sl_handle = client->adapter; st->plat_data = *(struct mpu_platform_data *)dev_get_platdata(&client->dev); st->i2c_addr = client->addr; st->delay = AK89XX_DEFAULT_DELAY; st->compass_id = id->driver_data; st->compass_scale = 0; i2c_set_clientdata(client, indio_dev); result = ak89xx_init(st); if (result) goto out_free; indio_dev->dev.parent = &client->dev; indio_dev->name = id->name; indio_dev->channels = compass_channels; indio_dev->num_channels = ARRAY_SIZE(compass_channels); indio_dev->info = &ak89xx_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->currentmode = INDIO_DIRECT_MODE; result = inv_ak89xx_configure_ring(indio_dev); if (result) goto out_free; result = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (result) goto out_unreg_ring; result = inv_ak89xx_probe_trigger(indio_dev); if (result) goto out_remove_ring; result = iio_device_register(indio_dev); if (result) goto out_remove_trigger; INIT_DELAYED_WORK(&st->work, ak89xx_work_func); pr_info("%s: Probe name %s\n", __func__, id->name); return 0; out_remove_trigger: if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) inv_ak89xx_remove_trigger(indio_dev); out_remove_ring: iio_buffer_unregister(indio_dev); out_unreg_ring: inv_ak89xx_unconfigure_ring(indio_dev); out_free: iio_free_device(indio_dev); out_no_free: dev_err(&client->adapter->dev, "%s failed %d\n", __func__, result); return -EIO; } /** * inv_ak89xx_remove() - remove function. */ static int inv_ak89xx_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); struct inv_ak89xx_state_s *st = iio_priv(indio_dev); cancel_delayed_work_sync(&st->work); iio_device_unregister(indio_dev); inv_ak89xx_remove_trigger(indio_dev); iio_buffer_unregister(indio_dev); inv_ak89xx_unconfigure_ring(indio_dev); iio_free_device(indio_dev); dev_info(&client->adapter->dev, "inv-ak89xx-iio module removed.\n"); return 0; } static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* device id table is used to identify what device can be * supported by this driver */ static const struct i2c_device_id inv_ak89xx_id[] = { {"akm8975", COMPASS_ID_AK8975}, {"akm8972", COMPASS_ID_AK8972}, {"akm8963", COMPASS_ID_AK8963}, {} }; MODULE_DEVICE_TABLE(i2c, inv_ak89xx_id); static struct i2c_driver inv_ak89xx_driver = { .class = I2C_CLASS_HWMON, .probe = inv_ak89xx_probe, .remove = inv_ak89xx_remove, .id_table = inv_ak89xx_id, .driver = { .owner = THIS_MODULE, .name = "inv-ak89xx-iio", }, .address_list = normal_i2c, }; static int __init inv_ak89xx_init(void) { int result = i2c_add_driver(&inv_ak89xx_driver); if (result) { pr_err("%s failed\n", __func__); return result; } return 0; } static void __exit inv_ak89xx_exit(void) { i2c_del_driver(&inv_ak89xx_driver); } module_init(inv_ak89xx_init); module_exit(inv_ak89xx_exit); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("Invensense device driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("inv-ak89xx-iio");
gpl-2.0
iamroot12CD/linux
net/ipv4/proc.c
218
20524
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * This file implements the various access functions for the * PROC file system. It is mainly used for debugging and * statistics. * * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> * Erik Schoenfelder, <schoenfr@ibr.cs.tu-bs.de> * * Fixes: * Alan Cox : UDP sockets show the rxqueue/txqueue * using hint flag for the netinfo. * Pauline Middelink : identd support * Alan Cox : Make /proc safer. * Erik Schoenfelder : /proc/net/snmp * Alan Cox : Handle dead sockets properly. * Gerhard Koerting : Show both timers * Alan Cox : Allow inode to be NULL (kernel socket) * Andi Kleen : Add support for open_requests and * split functions for more readibility. * Andi Kleen : Add support for /proc/net/netstat * Arnaldo C. Melo : Convert to seq_file * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/udp.h> #include <net/udplite.h> #include <linux/bottom_half.h> #include <linux/inetdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/sock.h> #include <net/raw.h> /* * Report socket allocation statistics [mea@utu.fi] */ static int sockstat_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; unsigned int frag_mem; int orphans, sockets; local_bh_disable(); orphans = percpu_counter_sum_positive(&tcp_orphan_count); sockets = proto_sockets_allocated_sum_positive(&tcp_prot); local_bh_enable(); socket_seq_show(seq); seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", sock_prot_inuse_get(net, &tcp_prot), orphans, atomic_read(&tcp_death_row.tw_count), sockets, proto_memory_allocated(&tcp_prot)); seq_printf(seq, "UDP: inuse %d mem %ld\n", sock_prot_inuse_get(net, &udp_prot), proto_memory_allocated(&udp_prot)); seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(net, &raw_prot)); frag_mem = ip_frag_mem(net); seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem); return 0; } static int sockstat_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, sockstat_seq_show); } static const struct file_operations sockstat_seq_fops = { .owner = THIS_MODULE, .open = sockstat_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; /* snmp items */ static const struct snmp_mib snmp4_ipstats_list[] = { SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INPKTS), SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS), SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS), SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS), SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS), SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTPKTS), SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS), SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS), SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS), SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS), SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS), SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS), SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES), SNMP_MIB_SENTINEL }; /* Following items are displayed in /proc/net/netstat */ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES), SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS), SNMP_MIB_ITEM("InMcastPkts", IPSTATS_MIB_INMCASTPKTS), SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS), SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS), SNMP_MIB_ITEM("InOctets", IPSTATS_MIB_INOCTETS), SNMP_MIB_ITEM("OutOctets", IPSTATS_MIB_OUTOCTETS), SNMP_MIB_ITEM("InMcastOctets", IPSTATS_MIB_INMCASTOCTETS), SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), /* Non RFC4293 fields */ SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS), SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS), SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), SNMP_MIB_SENTINEL }; static const struct { const char *name; int index; } icmpmibmap[] = { { "DestUnreachs", ICMP_DEST_UNREACH }, { "TimeExcds", ICMP_TIME_EXCEEDED }, { "ParmProbs", ICMP_PARAMETERPROB }, { "SrcQuenchs", ICMP_SOURCE_QUENCH }, { "Redirects", ICMP_REDIRECT }, { "Echos", ICMP_ECHO }, { "EchoReps", ICMP_ECHOREPLY }, { "Timestamps", ICMP_TIMESTAMP }, { "TimestampReps", ICMP_TIMESTAMPREPLY }, { "AddrMasks", ICMP_ADDRESS }, { "AddrMaskReps", ICMP_ADDRESSREPLY }, { NULL, 0 } }; static const struct snmp_mib snmp4_tcp_list[] = { SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM), SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN), SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX), SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN), SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS), SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS), SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS), SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS), SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB), SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS), SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS), SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS), SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS), SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS), SNMP_MIB_ITEM("InCsumErrors", TCP_MIB_CSUMERRORS), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp4_udp_list[] = { SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS), SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT), SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV), SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED), SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS), SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED), SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED), SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED), SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS), SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS), SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER), SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED), SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED), SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED), SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED), SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED), SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED), SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS), SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED), SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST), SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS), SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS), SNMP_MIB_ITEM("TCPPrequeued", LINUX_MIB_TCPPREQUEUED), SNMP_MIB_ITEM("TCPDirectCopyFromBacklog", LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG), SNMP_MIB_ITEM("TCPDirectCopyFromPrequeue", LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE), SNMP_MIB_ITEM("TCPPrequeueDropped", LINUX_MIB_TCPPREQUEUEDROPPED), SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS), SNMP_MIB_ITEM("TCPHPHitsToUser", LINUX_MIB_TCPHPHITSTOUSER), SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS), SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS), SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY), SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY), SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING), SNMP_MIB_ITEM("TCPFACKReorder", LINUX_MIB_TCPFACKREORDER), SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER), SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER), SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER), SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO), SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES), SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS), SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS), SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES), SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY), SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED), SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED), SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT), SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV), SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV), SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA), SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE), SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY), SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT), SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER), SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED), SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES), SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD), SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD), SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO), SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS), SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND), SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED), SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE), SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP), SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE), SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE), SNMP_MIB_ITEM("TCPFastOpenActiveFail", LINUX_MIB_TCPFASTOPENACTIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE), SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING), SNMP_MIB_ITEM("TCPFromZeroWindowAdv", LINUX_MIB_TCPFROMZEROWINDOWADV), SNMP_MIB_ITEM("TCPToZeroWindowAdv", LINUX_MIB_TCPTOZEROWINDOWADV), SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV), SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS), SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT), SNMP_MIB_ITEM("TCPHystartTrainDetect", LINUX_MIB_TCPHYSTARTTRAINDETECT), SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND), SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT), SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND), SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV), SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS), SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ), SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2), SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT), SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE), SNMP_MIB_SENTINEL }; static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, unsigned short *type, int count) { int j; if (count) { seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %sType%u", type[j] & 0x100 ? "Out" : "In", type[j] & 0xff); seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %lu", vals[j]); } } static void icmpmsg_put(struct seq_file *seq) { #define PERLINE 16 int i, count; unsigned short type[PERLINE]; unsigned long vals[PERLINE], val; struct net *net = seq->private; count = 0; for (i = 0; i < ICMPMSG_MIB_MAX; i++) { val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]); if (val) { type[count] = i; vals[count++] = val; } if (count == PERLINE) { icmpmsg_put_line(seq, vals, type, count); count = 0; } } icmpmsg_put_line(seq, vals, type, count); #undef PERLINE } static void icmp_put(struct seq_file *seq) { int i; struct net *net = seq->private; atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs; seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " In%s", icmpmibmap[i].name); seq_puts(seq, " OutMsgs OutErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " Out%s", icmpmibmap[i].name); seq_printf(seq, "\nIcmp: %lu %lu %lu", snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS)); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", atomic_long_read(ptr + icmpmibmap[i].index)); seq_printf(seq, " %lu %lu", snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); } /* * Called from the PROCfs module. This outputs /proc/net/snmp. */ static int snmp_seq_show(struct seq_file *seq, void *v) { int i; struct net *net = seq->private; seq_puts(seq, "Ip: Forwarding DefaultTTL"); for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_ipstats_list[i].name); seq_printf(seq, "\nIp: %d %d", IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, sysctl_ip_default_ttl); BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) seq_printf(seq, " %llu", snmp_fold_field64(net->mib.ip_statistics, snmp4_ipstats_list[i].entry, offsetof(struct ipstats_mib, syncp))); icmp_put(seq); /* RFC 2011 compatibility */ icmpmsg_put(seq); seq_puts(seq, "\nTcp:"); for (i = 0; snmp4_tcp_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_tcp_list[i].name); seq_puts(seq, "\nTcp:"); for (i = 0; snmp4_tcp_list[i].name != NULL; i++) { /* MaxConn field is signed, RFC 2012 */ if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) seq_printf(seq, " %ld", snmp_fold_field(net->mib.tcp_statistics, snmp4_tcp_list[i].entry)); else seq_printf(seq, " %lu", snmp_fold_field(net->mib.tcp_statistics, snmp4_tcp_list[i].entry)); } seq_puts(seq, "\nUdp:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_udp_list[i].name); seq_puts(seq, "\nUdp:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %lu", snmp_fold_field(net->mib.udp_statistics, snmp4_udp_list[i].entry)); /* the UDP and UDP-Lite MIBs are the same */ seq_puts(seq, "\nUdpLite:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_udp_list[i].name); seq_puts(seq, "\nUdpLite:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %lu", snmp_fold_field(net->mib.udplite_statistics, snmp4_udp_list[i].entry)); seq_putc(seq, '\n'); return 0; } static int snmp_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, snmp_seq_show); } static const struct file_operations snmp_seq_fops = { .owner = THIS_MODULE, .open = snmp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; /* * Output /proc/net/netstat */ static int netstat_seq_show(struct seq_file *seq, void *v) { int i; struct net *net = seq->private; seq_puts(seq, "TcpExt:"); for (i = 0; snmp4_net_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_net_list[i].name); seq_puts(seq, "\nTcpExt:"); for (i = 0; snmp4_net_list[i].name != NULL; i++) seq_printf(seq, " %lu", snmp_fold_field(net->mib.net_statistics, snmp4_net_list[i].entry)); seq_puts(seq, "\nIpExt:"); for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) seq_printf(seq, " %s", snmp4_ipextstats_list[i].name); seq_puts(seq, "\nIpExt:"); for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) seq_printf(seq, " %llu", snmp_fold_field64(net->mib.ip_statistics, snmp4_ipextstats_list[i].entry, offsetof(struct ipstats_mib, syncp))); seq_putc(seq, '\n'); return 0; } static int netstat_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, netstat_seq_show); } static const struct file_operations netstat_seq_fops = { .owner = THIS_MODULE, .open = netstat_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; static __net_init int ip_proc_init_net(struct net *net) { if (!proc_create("sockstat", S_IRUGO, net->proc_net, &sockstat_seq_fops)) goto out_sockstat; if (!proc_create("netstat", S_IRUGO, net->proc_net, &netstat_seq_fops)) goto out_netstat; if (!proc_create("snmp", S_IRUGO, net->proc_net, &snmp_seq_fops)) goto out_snmp; return 0; out_snmp: remove_proc_entry("netstat", net->proc_net); out_netstat: remove_proc_entry("sockstat", net->proc_net); out_sockstat: return -ENOMEM; } static __net_exit void ip_proc_exit_net(struct net *net) { remove_proc_entry("snmp", net->proc_net); remove_proc_entry("netstat", net->proc_net); remove_proc_entry("sockstat", net->proc_net); } static __net_initdata struct pernet_operations ip_proc_ops = { .init = ip_proc_init_net, .exit = ip_proc_exit_net, }; int __init ip_misc_proc_init(void) { return register_pernet_subsys(&ip_proc_ops); }
gpl-2.0
ender-chen/linux-rt-rpi
drivers/rtc/rtc-88pm860x.c
474
12944
/* * Real Time Clock driver for Marvell 88PM860x PMIC * * Copyright (c) 2010 Marvell International Ltd. * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/rtc.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/mfd/88pm860x.h> #define VRTC_CALIBRATION struct pm860x_rtc_info { struct pm860x_chip *chip; struct i2c_client *i2c; struct rtc_device *rtc_dev; struct device *dev; struct delayed_work calib_work; int irq; int vrtc; int (*sync)(unsigned int ticks); }; #define REG_VRTC_MEAS1 0x7D #define REG0_ADDR 0xB0 #define REG1_ADDR 0xB2 #define REG2_ADDR 0xB4 #define REG3_ADDR 0xB6 #define REG0_DATA 0xB1 #define REG1_DATA 0xB3 #define REG2_DATA 0xB5 #define REG3_DATA 0xB7 /* bit definitions of Measurement Enable Register 2 (0x51) */ #define MEAS2_VRTC (1 << 0) /* bit definitions of RTC Register 1 (0xA0) */ #define ALARM_EN (1 << 3) #define ALARM_WAKEUP (1 << 4) #define ALARM (1 << 5) #define RTC1_USE_XO (1 << 7) #define VRTC_CALIB_INTERVAL (HZ * 60 * 10) /* 10 minutes */ static irqreturn_t rtc_update_handler(int irq, void *data) { struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data; int mask; mask = ALARM | ALARM_WAKEUP; pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask); rtc_update_irq(info->rtc_dev, 1, RTC_AF); return IRQ_HANDLED; } static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); if (enabled) pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, ALARM_EN); else pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0); return 0; } /* * Calculate the next alarm time given the requested alarm time mask * and the current time. */ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) { unsigned long next_time; unsigned long now_time; next->tm_year = now->tm_year; next->tm_mon = now->tm_mon; next->tm_mday = now->tm_mday; next->tm_hour = alrm->tm_hour; next->tm_min = alrm->tm_min; next->tm_sec = alrm->tm_sec; rtc_tm_to_time(now, &now_time); rtc_tm_to_time(next, &next_time); if (next_time < now_time) { /* Advance one day */ next_time += 60 * 60 * 24; rtc_time_to_tm(next_time, next); } } static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[8]; unsigned long ticks, base, data; pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, tm); return 0; } static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[4]; unsigned long ticks, base, data; if ((tm->tm_year < 70) || (tm->tm_year > 138)) { dev_dbg(info->dev, "Set time %d out of range. " "Please set time between 1970 to 2038.\n", 1900 + tm->tm_year); return -EINVAL; } rtc_tm_to_time(tm, &ticks); /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; base = ticks - data; dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF); pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF); pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF); pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF); if (info->sync) info->sync(ticks); return 0; } static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[8]; unsigned long ticks, base, data; int ret; pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &alrm->time); ret = pm860x_reg_read(info->i2c, PM8607_RTC1); alrm->enabled = (ret & ALARM_EN) ? 1 : 0; alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0; return 0; } static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); struct rtc_time now_tm, alarm_tm; unsigned long ticks, base, data; unsigned char buf[8]; int mask; pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0); pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &now_tm); rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); /* get new ticks for alarm in 24 hours */ rtc_tm_to_time(&alarm_tm, &ticks); data = ticks - base; buf[0] = data & 0xff; buf[1] = (data >> 8) & 0xff; buf[2] = (data >> 16) & 0xff; buf[3] = (data >> 24) & 0xff; pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); if (alrm->enabled) { mask = ALARM | ALARM_WAKEUP | ALARM_EN; pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask); } else { mask = ALARM | ALARM_WAKEUP | ALARM_EN; pm860x_set_bits(info->i2c, PM8607_RTC1, mask, ALARM | ALARM_WAKEUP); } return 0; } static const struct rtc_class_ops pm860x_rtc_ops = { .read_time = pm860x_rtc_read_time, .set_time = pm860x_rtc_set_time, .read_alarm = pm860x_rtc_read_alarm, .set_alarm = pm860x_rtc_set_alarm, .alarm_irq_enable = pm860x_rtc_alarm_irq_enable, }; #ifdef VRTC_CALIBRATION static void calibrate_vrtc_work(struct work_struct *work) { struct pm860x_rtc_info *info = container_of(work, struct pm860x_rtc_info, calib_work.work); unsigned char buf[2]; unsigned int sum, data, mean, vrtc_set; int i; for (i = 0, sum = 0; i < 16; i++) { msleep(100); pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf); data = (buf[0] << 4) | buf[1]; data = (data * 5400) >> 12; /* convert to mv */ sum += data; } mean = sum >> 4; vrtc_set = 2700 + (info->vrtc & 0x3) * 200; dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set); sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1); data = sum & 0x3; if ((mean + 200) < vrtc_set) { /* try higher voltage */ if (++data == 4) goto out; data = (sum & 0xf8) | (data & 0x3); pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); } else if ((mean - 200) > vrtc_set) { /* try lower voltage */ if (data-- == 0) goto out; data = (sum & 0xf8) | (data & 0x3); pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); } else goto out; dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data); /* trigger next calibration since VRTC is updated */ schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); return; out: /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); dev_dbg(info->dev, "finish VRTC calibration\n"); return; } #endif #ifdef CONFIG_OF static int pm860x_rtc_dt_init(struct platform_device *pdev, struct pm860x_rtc_info *info) { struct device_node *np = pdev->dev.parent->of_node; int ret; if (!np) return -ENODEV; np = of_get_child_by_name(np, "rtc"); if (!np) { dev_err(&pdev->dev, "failed to find rtc node\n"); return -ENODEV; } ret = of_property_read_u32(np, "marvell,88pm860x-vrtc", &info->vrtc); if (ret) info->vrtc = 0; of_node_put(np); return 0; } #else #define pm860x_rtc_dt_init(x, y) (-1) #endif static int pm860x_rtc_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm860x_rtc_pdata *pdata = NULL; struct pm860x_rtc_info *info; struct rtc_time tm; unsigned long ticks = 0; int ret; pdata = dev_get_platdata(&pdev->dev); info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info), GFP_KERNEL); if (!info) return -ENOMEM; info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); return info->irq; } info->chip = chip; info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", info->irq, ret); return ret; } /* set addresses of 32-bit base value for RTC time */ pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA); pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA); pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA); pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA); ret = pm860x_rtc_read_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to read initial time.\n"); return ret; } if ((tm.tm_year < 70) || (tm.tm_year > 138)) { tm.tm_year = 70; tm.tm_mon = 0; tm.tm_mday = 1; tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; ret = pm860x_rtc_set_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to set initial time.\n"); return ret; } } rtc_tm_to_time(&tm, &ticks); if (pm860x_rtc_dt_init(pdev, info)) { if (pdata && pdata->sync) { pdata->sync(ticks); info->sync = pdata->sync; } } info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", &pm860x_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); if (IS_ERR(info->rtc_dev)) { dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); return ret; } /* * enable internal XO instead of internal 3.25MHz clock since it can * free running in PMIC power-down state. */ pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO); #ifdef VRTC_CALIBRATION /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */ if (pm860x_rtc_dt_init(pdev, info)) { if (pdata && pdata->vrtc) info->vrtc = pdata->vrtc & 0x3; else info->vrtc = 1; } pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC); /* calibrate VRTC */ INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work); schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); #endif /* VRTC_CALIBRATION */ device_init_wakeup(&pdev->dev, 1); return 0; } static int pm860x_rtc_remove(struct platform_device *pdev) { struct pm860x_rtc_info *info = platform_get_drvdata(pdev); #ifdef VRTC_CALIBRATION flush_scheduled_work(); /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); #endif /* VRTC_CALIBRATION */ return 0; } #ifdef CONFIG_PM_SLEEP static int pm860x_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag |= 1 << PM8607_IRQ_RTC; return 0; } static int pm860x_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag &= ~(1 << PM8607_IRQ_RTC); return 0; } #endif static SIMPLE_DEV_PM_OPS(pm860x_rtc_pm_ops, pm860x_rtc_suspend, pm860x_rtc_resume); static struct platform_driver pm860x_rtc_driver = { .driver = { .name = "88pm860x-rtc", .owner = THIS_MODULE, .pm = &pm860x_rtc_pm_ops, }, .probe = pm860x_rtc_probe, .remove = pm860x_rtc_remove, }; module_platform_driver(pm860x_rtc_driver); MODULE_DESCRIPTION("Marvell 88PM860x RTC driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
mturquette/linux-omap-android
arch/mips/alchemy/devboards/prom.c
730
2390
/* * Common code used by all Alchemy develboards. * * Extracted from files which had this to say: * * Copyright 2000, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/bootinfo.h> #include <asm/mach-au1x00/au1000.h> #include <prom.h> #if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_DB1000) || \ defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_DB1100) || \ defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_DB1500) || \ defined(CONFIG_MIPS_BOSPORUS) || defined(CONFIG_MIPS_MIRAGE) #define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x04000000 #else /* Au1550/Au1200-based develboards */ #define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x08000000 #endif void __init prom_init(void) { unsigned char *memsize_str; unsigned long memsize; prom_argc = (int)fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize_str = prom_getenv("memsize"); if (!memsize_str) memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE; else strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); }
gpl-2.0
Kernel-Saram/ef30s-ics-kernel
drivers/s390/crypto/zcrypt_pcicc.c
986
18895
/* * linux/drivers/s390/crypto/zcrypt_pcicc.c * * zcrypt 2.1.0 * * Copyright (C) 2001, 2006 IBM Corporation * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/err.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" #include "zcrypt_api.h" #include "zcrypt_error.h" #include "zcrypt_pcicc.h" #include "zcrypt_cca_key.h" #define PCICC_MIN_MOD_SIZE 64 /* 512 bits */ #define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ #define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ /* * PCICC cards need a speed rating of 0. This keeps them at the end of * the zcrypt device list (see zcrypt_api.c). PCICC cards are only * used if no other cards are present because they are slow and can only * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded * requests are rejected. The modexpo function encrypts PKCS12 padded data * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts * the data in the assumption that its PKCS12 encrypted data. */ #define PCICC_SPEED_RATING 0 #define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */ #define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */ #define PCICC_CLEANUP_TIME (15*HZ) static struct ap_device_id zcrypt_pcicc_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_PCICC) }, { /* end of list */ }, }; #ifndef CONFIG_ZCRYPT_MONOLITHIC MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " "Copyright 2001, 2006 IBM Corporation"); MODULE_LICENSE("GPL"); #endif static int zcrypt_pcicc_probe(struct ap_device *ap_dev); static void zcrypt_pcicc_remove(struct ap_device *ap_dev); static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *, struct ap_message *); static struct ap_driver zcrypt_pcicc_driver = { .probe = zcrypt_pcicc_probe, .remove = zcrypt_pcicc_remove, .receive = zcrypt_pcicc_receive, .ids = zcrypt_pcicc_ids, .request_timeout = PCICC_CLEANUP_TIME, }; /** * The following is used to initialize the CPRB passed to the PCICC card * in a type6 message. The 3 fields that must be filled in at execution * time are req_parml, rpl_parml and usage_domain. Note that all three * fields are *little*-endian. Actually, everything about this interface * is ascii/little-endian, since the device has 'Intel inside'. * * The CPRB is followed immediately by the parm block. * The parm block contains: * - function code ('PD' 0x5044 or 'PK' 0x504B) * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD') * - VUD block */ static struct CPRB static_cprb = { .cprb_len = __constant_cpu_to_le16(0x0070), .cprb_ver_id = 0x41, .func_id = {0x54,0x32}, .checkpoint_flag= 0x01, .svr_namel = __constant_cpu_to_le16(0x0008), .svr_name = {'I','C','S','F',' ',' ',' ',' '} }; /** * Check the message for PKCS11 padding. */ static inline int is_PKCS11_padded(unsigned char *buffer, int length) { int i; if ((buffer[0] != 0x00) || (buffer[1] != 0x01)) return 0; for (i = 2; i < length; i++) if (buffer[i] != 0xFF) break; if (i < 10 || i == length) return 0; if (buffer[i] != 0x00) return 0; return 1; } /** * Check the message for PKCS12 padding. */ static inline int is_PKCS12_padded(unsigned char *buffer, int length) { int i; if ((buffer[0] != 0x00) || (buffer[1] != 0x02)) return 0; for (i = 2; i < length; i++) if (buffer[i] == 0x00) break; if ((i < 10) || (i == length)) return 0; if (buffer[i] != 0x00) return 0; return 1; } /** * Convert a ICAMEX message to a type6 MEX message. * * @zdev: crypto device pointer * @zreq: crypto request pointer * @mex: pointer to user input data * * Returns 0 on success or -EFAULT. */ static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev, struct ap_message *ap_msg, struct ica_rsa_modexpo *mex) { static struct type6_hdr static_type6_hdr = { .type = 0x06, .offset1 = 0x00000058, .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, .function_code = {'P','K'}, }; static struct function_and_rules_block static_pke_function_and_rules ={ .function_code = {'P','K'}, .ulen = __constant_cpu_to_le16(10), .only_rule = {'P','K','C','S','-','1','.','2'} }; struct { struct type6_hdr hdr; struct CPRB cprb; struct function_and_rules_block fr; unsigned short length; char text[0]; } __attribute__((packed)) *msg = ap_msg->message; int vud_len, pad_len, size; /* VUD.ciphertext */ if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) return -EFAULT; if (is_PKCS11_padded(msg->text, mex->inputdatalength)) return -EINVAL; /* static message header and f&r */ msg->hdr = static_type6_hdr; msg->fr = static_pke_function_and_rules; if (is_PKCS12_padded(msg->text, mex->inputdatalength)) { /* strip the padding and adjust the data length */ pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3; if (pad_len <= 9 || pad_len >= mex->inputdatalength) return -ENODEV; vud_len = mex->inputdatalength - pad_len; memmove(msg->text, msg->text + pad_len, vud_len); msg->length = cpu_to_le16(vud_len + 2); /* Set up key after the variable length text. */ size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0); if (size < 0) return size; size += sizeof(*msg) + vud_len; /* total size of msg */ } else { vud_len = mex->inputdatalength; msg->length = cpu_to_le16(2 + vud_len); msg->hdr.function_code[1] = 'D'; msg->fr.function_code[1] = 'D'; /* Set up key after the variable length text. */ size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0); if (size < 0) return size; size += sizeof(*msg) + vud_len; /* total size of msg */ } /* message header, cprb and f&r */ msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprb = static_cprb; msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid); msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb)); msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1); ap_msg->length = (size + 3) & -4; return 0; } /** * Convert a ICACRT message to a type6 CRT message. * * @zdev: crypto device pointer * @zreq: crypto request pointer * @crt: pointer to user input data * * Returns 0 on success or -EFAULT. */ static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev, struct ap_message *ap_msg, struct ica_rsa_modexpo_crt *crt) { static struct type6_hdr static_type6_hdr = { .type = 0x06, .offset1 = 0x00000058, .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, .function_code = {'P','D'}, }; static struct function_and_rules_block static_pkd_function_and_rules ={ .function_code = {'P','D'}, .ulen = __constant_cpu_to_le16(10), .only_rule = {'P','K','C','S','-','1','.','2'} }; struct { struct type6_hdr hdr; struct CPRB cprb; struct function_and_rules_block fr; unsigned short length; char text[0]; } __attribute__((packed)) *msg = ap_msg->message; int size; /* VUD.ciphertext */ msg->length = cpu_to_le16(2 + crt->inputdatalength); if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) return -EFAULT; if (is_PKCS11_padded(msg->text, crt->inputdatalength)) return -EINVAL; /* Set up key after the variable length text. */ size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0); if (size < 0) return size; size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ /* message header, cprb and f&r */ msg->hdr = static_type6_hdr; msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprb = static_cprb; msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid); msg->cprb.req_parml = msg->cprb.rpl_parml = cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb)); msg->fr = static_pkd_function_and_rules; ap_msg->length = (size + 3) & -4; return 0; } /** * Copy results from a type 86 reply message back to user space. * * @zdev: crypto device pointer * @reply: reply AP message. * @data: pointer to user output data * @length: size of user output data * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ struct type86_reply { struct type86_hdr hdr; struct type86_fmt2_ext fmt2; struct CPRB cprb; unsigned char pad[4]; /* 4 byte function code/rules block ? */ unsigned short length; char text[0]; } __attribute__((packed)); static int convert_type86(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { static unsigned char static_pad[] = { 0x00,0x02, 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD, 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B, 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5, 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB, 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9, 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9, 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD, 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD, 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B, 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B, 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7, 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3, 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55, 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F, 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5, 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41, 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 }; struct type86_reply *msg = reply->message; unsigned short service_rc, service_rs; unsigned int reply_len, pad_len; char *data; service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); if (unlikely(service_rc != 0)) { service_rs = le16_to_cpu(msg->cprb.ccp_rscode); if (service_rc == 8 && service_rs == 66) return -EINVAL; if (service_rc == 8 && service_rs == 65) return -EINVAL; if (service_rc == 8 && service_rs == 770) { zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; return -EAGAIN; } if (service_rc == 8 && service_rs == 783) { zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; return -EAGAIN; } if (service_rc == 8 && service_rs == 72) return -EINVAL; zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } data = msg->text; reply_len = le16_to_cpu(msg->length) - 2; if (reply_len > outputdatalength) return -EINVAL; /* * For all encipher requests, the length of the ciphertext (reply_len) * will always equal the modulus length. For MEX decipher requests * the output needs to get padded. Minimum pad size is 10. * * Currently, the cases where padding will be added is for: * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support * ZERO-PAD and CRT is only supported for PKD requests) * - PCICC, always */ pad_len = outputdatalength - reply_len; if (pad_len > 0) { if (pad_len < 10) return -EINVAL; /* 'restore' padding left in the PCICC/PCIXCC card. */ if (copy_to_user(outputdata, static_pad, pad_len - 1)) return -EFAULT; if (put_user(0, outputdata + pad_len - 1)) return -EFAULT; } /* Copy the crypto response to user space. */ if (copy_to_user(outputdata + pad_len, data, reply_len)) return -EFAULT; return 0; } static int convert_response(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { struct type86_reply *msg = reply->message; /* Response type byte is the second byte in the response. */ switch (msg->hdr.type) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: return convert_error(zdev, reply); case TYPE86_RSP_CODE: if (msg->hdr.reply_code) return convert_error(zdev, reply); if (msg->cprb.cprb_ver_id == 0x01) return convert_type86(zdev, reply, outputdata, outputdatalength); /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } } /** * This function is called from the AP bus code after a crypto request * "msg" has finished with the reply message "reply". * It is called from tasklet context. * @ap_dev: pointer to the AP device * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ static void zcrypt_pcicc_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) { static struct error_hdr error_reply = { .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; struct type86_reply *t86r; int length; /* Copy the reply message to the request message buffer. */ if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); goto out; } t86r = reply->message; if (t86r->hdr.type == TYPE86_RSP_CODE && t86r->cprb.cprb_ver_id == 0x01) { length = sizeof(struct type86_reply) + t86r->length - 2; length = min(PCICC_MAX_RESPONSE_SIZE, length); memcpy(msg->message, reply->message, length); } else memcpy(msg->message, reply->message, sizeof error_reply); out: complete((struct completion *) msg->private); } static atomic_t zcrypt_step = ATOMIC_INIT(0); /** * The request distributor calls this function if it picked the PCICC * device to handle a modexpo request. * @zdev: pointer to zcrypt_device structure that identifies the * PCICC device to the request distributor * @mex: pointer to the modexpo request buffer */ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev, struct ica_rsa_modexpo *mex) { struct ap_message ap_msg; struct completion work; int rc; ap_init_message(&ap_msg); ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.length = PAGE_SIZE; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex); if (rc) goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) rc = convert_response(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); else /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); out_free: free_page((unsigned long) ap_msg.message); return rc; } /** * The request distributor calls this function if it picked the PCICC * device to handle a modexpo_crt request. * @zdev: pointer to zcrypt_device structure that identifies the * PCICC device to the request distributor * @crt: pointer to the modexpoc_crt request buffer */ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev, struct ica_rsa_modexpo_crt *crt) { struct ap_message ap_msg; struct completion work; int rc; ap_init_message(&ap_msg); ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.length = PAGE_SIZE; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt); if (rc) goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) rc = convert_response(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); else /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); out_free: free_page((unsigned long) ap_msg.message); return rc; } /** * The crypto operations for a PCICC card. */ static struct zcrypt_ops zcrypt_pcicc_ops = { .rsa_modexpo = zcrypt_pcicc_modexpo, .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt, }; /** * Probe function for PCICC cards. It always accepts the AP device * since the bus_match already checked the hardware type. * @ap_dev: pointer to the AP device. */ static int zcrypt_pcicc_probe(struct ap_device *ap_dev) { struct zcrypt_device *zdev; int rc; zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE); if (!zdev) return -ENOMEM; zdev->ap_dev = ap_dev; zdev->ops = &zcrypt_pcicc_ops; zdev->online = 1; zdev->user_space_type = ZCRYPT_PCICC; zdev->type_string = "PCICC"; zdev->min_mod_size = PCICC_MIN_MOD_SIZE; zdev->max_mod_size = PCICC_MAX_MOD_SIZE; zdev->speed_rating = PCICC_SPEED_RATING; ap_dev->reply = &zdev->reply; ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) goto out_free; return 0; out_free: ap_dev->private = NULL; zcrypt_device_free(zdev); return rc; } /** * This is called to remove the extended PCICC driver information * if an AP device is removed. */ static void zcrypt_pcicc_remove(struct ap_device *ap_dev) { struct zcrypt_device *zdev = ap_dev->private; zcrypt_device_unregister(zdev); } int __init zcrypt_pcicc_init(void) { return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc"); } void zcrypt_pcicc_exit(void) { ap_driver_unregister(&zcrypt_pcicc_driver); } #ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_pcicc_init); module_exit(zcrypt_pcicc_exit); #endif
gpl-2.0
Radium-Devices/Radium_jflte
drivers/hwmon/sht15.c
1498
29785
/* * sht15.c - support for the SHT15 Temperature and Humidity Sensor * * Portions Copyright (c) 2010-2011 Savoir-faire Linux Inc. * Jerome Oufella <jerome.oufella@savoirfairelinux.com> * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * * Copyright (c) 2009 Jonathan Cameron * * Copyright (c) 2007 Wouter Horre * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * For further information, see the Documentation/hwmon/sht15 file. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/module.h> #include <linux/init.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/err.h> #include <linux/sht15.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/atomic.h> /* Commands */ #define SHT15_MEASURE_TEMP 0x03 #define SHT15_MEASURE_RH 0x05 #define SHT15_WRITE_STATUS 0x06 #define SHT15_READ_STATUS 0x07 #define SHT15_SOFT_RESET 0x1E /* Min timings */ #define SHT15_TSCKL 100 /* (nsecs) clock low */ #define SHT15_TSCKH 100 /* (nsecs) clock high */ #define SHT15_TSU 150 /* (nsecs) data setup time */ #define SHT15_TSRST 11 /* (msecs) soft reset time */ /* Status Register Bits */ #define SHT15_STATUS_LOW_RESOLUTION 0x01 #define SHT15_STATUS_NO_OTP_RELOAD 0x02 #define SHT15_STATUS_HEATER 0x04 #define SHT15_STATUS_LOW_BATTERY 0x40 /* Actions the driver may be doing */ enum sht15_state { SHT15_READING_NOTHING, SHT15_READING_TEMP, SHT15_READING_HUMID }; /** * struct sht15_temppair - elements of voltage dependent temp calc * @vdd: supply voltage in microvolts * @d1: see data sheet */ struct sht15_temppair { int vdd; /* microvolts */ int d1; }; /* Table 9 from datasheet - relates temperature calculation to supply voltage */ static const struct sht15_temppair temppoints[] = { { 2500000, -39400 }, { 3000000, -39600 }, { 3500000, -39700 }, { 4000000, -39800 }, { 5000000, -40100 }, }; /* Table from CRC datasheet, section 2.4 */ static const u8 sht15_crc8_table[] = { 0, 49, 98, 83, 196, 245, 166, 151, 185, 136, 219, 234, 125, 76, 31, 46, 67, 114, 33, 16, 135, 182, 229, 212, 250, 203, 152, 169, 62, 15, 92, 109, 134, 183, 228, 213, 66, 115, 32, 17, 63, 14, 93, 108, 251, 202, 153, 168, 197, 244, 167, 150, 1, 48, 99, 82, 124, 77, 30, 47, 184, 137, 218, 235, 61, 12, 95, 110, 249, 200, 155, 170, 132, 181, 230, 215, 64, 113, 34, 19, 126, 79, 28, 45, 186, 139, 216, 233, 199, 246, 165, 148, 3, 50, 97, 80, 187, 138, 217, 232, 127, 78, 29, 44, 2, 51, 96, 81, 198, 247, 164, 149, 248, 201, 154, 171, 60, 13, 94, 111, 65, 112, 35, 18, 133, 180, 231, 214, 122, 75, 24, 41, 190, 143, 220, 237, 195, 242, 161, 144, 7, 54, 101, 84, 57, 8, 91, 106, 253, 204, 159, 174, 128, 177, 226, 211, 68, 117, 38, 23, 252, 205, 158, 175, 56, 9, 90, 107, 69, 116, 39, 22, 129, 176, 227, 210, 191, 142, 221, 236, 123, 74, 25, 40, 6, 55, 100, 85, 194, 243, 160, 145, 71, 118, 37, 20, 131, 178, 225, 208, 254, 207, 156, 173, 58, 11, 88, 105, 4, 53, 102, 87, 192, 241, 162, 147, 189, 140, 223, 238, 121, 72, 27, 42, 193, 240, 163, 146, 5, 52, 103, 86, 120, 73, 26, 43, 188, 141, 222, 239, 130, 179, 224, 209, 70, 119, 36, 21, 59, 10, 89, 104, 255, 206, 157, 172 }; /** * struct sht15_data - device instance specific data * @pdata: platform data (gpio's etc). * @read_work: bh of interrupt handler. * @wait_queue: wait queue for getting values from device. * @val_temp: last temperature value read from device. * @val_humid: last humidity value read from device. * @val_status: last status register value read from device. * @checksum_ok: last value read from the device passed CRC validation. * @checksumming: flag used to enable the data validation with CRC. * @state: state identifying the action the driver is doing. * @measurements_valid: are the current stored measures valid (start condition). * @status_valid: is the current stored status valid (start condition). * @last_measurement: time of last measure. * @last_status: time of last status reading. * @read_lock: mutex to ensure only one read in progress at a time. * @dev: associate device structure. * @hwmon_dev: device associated with hwmon subsystem. * @reg: associated regulator (if specified). * @nb: notifier block to handle notifications of voltage * changes. * @supply_uV: local copy of supply voltage used to allow use of * regulator consumer if available. * @supply_uV_valid: indicates that an updated value has not yet been * obtained from the regulator and so any calculations * based upon it will be invalid. * @update_supply_work: work struct that is used to update the supply_uV. * @interrupt_handled: flag used to indicate a handler has been scheduled. */ struct sht15_data { struct sht15_platform_data *pdata; struct work_struct read_work; wait_queue_head_t wait_queue; uint16_t val_temp; uint16_t val_humid; u8 val_status; bool checksum_ok; bool checksumming; enum sht15_state state; bool measurements_valid; bool status_valid; unsigned long last_measurement; unsigned long last_status; struct mutex read_lock; struct device *dev; struct device *hwmon_dev; struct regulator *reg; struct notifier_block nb; int supply_uV; bool supply_uV_valid; struct work_struct update_supply_work; atomic_t interrupt_handled; }; /** * sht15_reverse() - reverse a byte * @byte: byte to reverse. */ static u8 sht15_reverse(u8 byte) { u8 i, c; for (c = 0, i = 0; i < 8; i++) c |= (!!(byte & (1 << i))) << (7 - i); return c; } /** * sht15_crc8() - compute crc8 * @data: sht15 specific data. * @value: sht15 retrieved data. * * This implements section 2 of the CRC datasheet. */ static u8 sht15_crc8(struct sht15_data *data, const u8 *value, int len) { u8 crc = sht15_reverse(data->val_status & 0x0F); while (len--) { crc = sht15_crc8_table[*value ^ crc]; value++; } return crc; } /** * sht15_connection_reset() - reset the comms interface * @data: sht15 specific data * * This implements section 3.4 of the data sheet */ static void sht15_connection_reset(struct sht15_data *data) { int i; gpio_direction_output(data->pdata->gpio_data, 1); ndelay(SHT15_TSCKL); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); for (i = 0; i < 9; ++i) { gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); } } /** * sht15_send_bit() - send an individual bit to the device * @data: device state data * @val: value of bit to be sent */ static inline void sht15_send_bit(struct sht15_data *data, int val) { gpio_set_value(data->pdata->gpio_data, val); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); /* clock low time */ } /** * sht15_transmission_start() - specific sequence for new transmission * @data: device state data * * Timings for this are not documented on the data sheet, so very * conservative ones used in implementation. This implements * figure 12 on the data sheet. */ static void sht15_transmission_start(struct sht15_data *data) { /* ensure data is high and output */ gpio_direction_output(data->pdata->gpio_data, 1); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); gpio_set_value(data->pdata->gpio_data, 0); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); gpio_set_value(data->pdata->gpio_data, 1); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); } /** * sht15_send_byte() - send a single byte to the device * @data: device state * @byte: value to be sent */ static void sht15_send_byte(struct sht15_data *data, u8 byte) { int i; for (i = 0; i < 8; i++) { sht15_send_bit(data, !!(byte & 0x80)); byte <<= 1; } } /** * sht15_wait_for_response() - checks for ack from device * @data: device state */ static int sht15_wait_for_response(struct sht15_data *data) { gpio_direction_input(data->pdata->gpio_data); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); if (gpio_get_value(data->pdata->gpio_data)) { gpio_set_value(data->pdata->gpio_sck, 0); dev_err(data->dev, "Command not acknowledged\n"); sht15_connection_reset(data); return -EIO; } gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); return 0; } /** * sht15_send_cmd() - Sends a command to the device. * @data: device state * @cmd: command byte to be sent * * On entry, sck is output low, data is output pull high * and the interrupt disabled. */ static int sht15_send_cmd(struct sht15_data *data, u8 cmd) { int ret = 0; sht15_transmission_start(data); sht15_send_byte(data, cmd); ret = sht15_wait_for_response(data); return ret; } /** * sht15_soft_reset() - send a soft reset command * @data: sht15 specific data. * * As described in section 3.2 of the datasheet. */ static int sht15_soft_reset(struct sht15_data *data) { int ret; ret = sht15_send_cmd(data, SHT15_SOFT_RESET); if (ret) return ret; msleep(SHT15_TSRST); /* device resets default hardware status register value */ data->val_status = 0; return ret; } /** * sht15_ack() - send a ack * @data: sht15 specific data. * * Each byte of data is acknowledged by pulling the data line * low for one clock pulse. */ static void sht15_ack(struct sht15_data *data) { gpio_direction_output(data->pdata->gpio_data, 0); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_data, 1); gpio_direction_input(data->pdata->gpio_data); } /** * sht15_end_transmission() - notify device of end of transmission * @data: device state. * * This is basically a NAK (single clock pulse, data high). */ static void sht15_end_transmission(struct sht15_data *data) { gpio_direction_output(data->pdata->gpio_data, 1); ndelay(SHT15_TSU); gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); } /** * sht15_read_byte() - Read a byte back from the device * @data: device state. */ static u8 sht15_read_byte(struct sht15_data *data) { int i; u8 byte = 0; for (i = 0; i < 8; ++i) { byte <<= 1; gpio_set_value(data->pdata->gpio_sck, 1); ndelay(SHT15_TSCKH); byte |= !!gpio_get_value(data->pdata->gpio_data); gpio_set_value(data->pdata->gpio_sck, 0); ndelay(SHT15_TSCKL); } return byte; } /** * sht15_send_status() - write the status register byte * @data: sht15 specific data. * @status: the byte to set the status register with. * * As described in figure 14 and table 5 of the datasheet. */ static int sht15_send_status(struct sht15_data *data, u8 status) { int ret; ret = sht15_send_cmd(data, SHT15_WRITE_STATUS); if (ret) return ret; gpio_direction_output(data->pdata->gpio_data, 1); ndelay(SHT15_TSU); sht15_send_byte(data, status); ret = sht15_wait_for_response(data); if (ret) return ret; data->val_status = status; return 0; } /** * sht15_update_status() - get updated status register from device if too old * @data: device instance specific data. * * As described in figure 15 and table 5 of the datasheet. */ static int sht15_update_status(struct sht15_data *data) { int ret = 0; u8 status; u8 previous_config; u8 dev_checksum = 0; u8 checksum_vals[2]; int timeout = HZ; mutex_lock(&data->read_lock); if (time_after(jiffies, data->last_status + timeout) || !data->status_valid) { ret = sht15_send_cmd(data, SHT15_READ_STATUS); if (ret) goto error_ret; status = sht15_read_byte(data); if (data->checksumming) { sht15_ack(data); dev_checksum = sht15_reverse(sht15_read_byte(data)); checksum_vals[0] = SHT15_READ_STATUS; checksum_vals[1] = status; data->checksum_ok = (sht15_crc8(data, checksum_vals, 2) == dev_checksum); } sht15_end_transmission(data); /* * Perform checksum validation on the received data. * Specification mentions that in case a checksum verification * fails, a soft reset command must be sent to the device. */ if (data->checksumming && !data->checksum_ok) { previous_config = data->val_status & 0x07; ret = sht15_soft_reset(data); if (ret) goto error_ret; if (previous_config) { ret = sht15_send_status(data, previous_config); if (ret) { dev_err(data->dev, "CRC validation failed, unable " "to restore device settings\n"); goto error_ret; } } ret = -EAGAIN; goto error_ret; } data->val_status = status; data->status_valid = true; data->last_status = jiffies; } error_ret: mutex_unlock(&data->read_lock); return ret; } /** * sht15_measurement() - get a new value from device * @data: device instance specific data * @command: command sent to request value * @timeout_msecs: timeout after which comms are assumed * to have failed are reset. */ static int sht15_measurement(struct sht15_data *data, int command, int timeout_msecs) { int ret; u8 previous_config; ret = sht15_send_cmd(data, command); if (ret) return ret; gpio_direction_input(data->pdata->gpio_data); atomic_set(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); if (gpio_get_value(data->pdata->gpio_data) == 0) { disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); /* Only relevant if the interrupt hasn't occurred. */ if (!atomic_read(&data->interrupt_handled)) schedule_work(&data->read_work); } ret = wait_event_timeout(data->wait_queue, (data->state == SHT15_READING_NOTHING), msecs_to_jiffies(timeout_msecs)); if (ret == 0) {/* timeout occurred */ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); sht15_connection_reset(data); return -ETIME; } /* * Perform checksum validation on the received data. * Specification mentions that in case a checksum verification fails, * a soft reset command must be sent to the device. */ if (data->checksumming && !data->checksum_ok) { previous_config = data->val_status & 0x07; ret = sht15_soft_reset(data); if (ret) return ret; if (previous_config) { ret = sht15_send_status(data, previous_config); if (ret) { dev_err(data->dev, "CRC validation failed, unable " "to restore device settings\n"); return ret; } } return -EAGAIN; } return 0; } /** * sht15_update_measurements() - get updated measures from device if too old * @data: device state */ static int sht15_update_measurements(struct sht15_data *data) { int ret = 0; int timeout = HZ; mutex_lock(&data->read_lock); if (time_after(jiffies, data->last_measurement + timeout) || !data->measurements_valid) { data->state = SHT15_READING_HUMID; ret = sht15_measurement(data, SHT15_MEASURE_RH, 160); if (ret) goto error_ret; data->state = SHT15_READING_TEMP; ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400); if (ret) goto error_ret; data->measurements_valid = true; data->last_measurement = jiffies; } error_ret: mutex_unlock(&data->read_lock); return ret; } /** * sht15_calc_temp() - convert the raw reading to a temperature * @data: device state * * As per section 4.3 of the data sheet. */ static inline int sht15_calc_temp(struct sht15_data *data) { int d1 = temppoints[0].d1; int d2 = (data->val_status & SHT15_STATUS_LOW_RESOLUTION) ? 40 : 10; int i; for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) /* Find pointer to interpolate */ if (data->supply_uV > temppoints[i - 1].vdd) { d1 = (data->supply_uV - temppoints[i - 1].vdd) * (temppoints[i].d1 - temppoints[i - 1].d1) / (temppoints[i].vdd - temppoints[i - 1].vdd) + temppoints[i - 1].d1; break; } return data->val_temp * d2 + d1; } /** * sht15_calc_humid() - using last temperature convert raw to humid * @data: device state * * This is the temperature compensated version as per section 4.2 of * the data sheet. * * The sensor is assumed to be V3, which is compatible with V4. * Humidity conversion coefficients are shown in table 7 of the datasheet. */ static inline int sht15_calc_humid(struct sht15_data *data) { int rh_linear; /* milli percent */ int temp = sht15_calc_temp(data); int c2, c3; int t2; const int c1 = -4; if (data->val_status & SHT15_STATUS_LOW_RESOLUTION) { c2 = 648000; /* x 10 ^ -6 */ c3 = -7200; /* x 10 ^ -7 */ t2 = 1280; } else { c2 = 40500; /* x 10 ^ -6 */ c3 = -28; /* x 10 ^ -7 */ t2 = 80; } rh_linear = c1 * 1000 + c2 * data->val_humid / 1000 + (data->val_humid * data->val_humid * c3) / 10000; return (temp - 25000) * (10000 + t2 * data->val_humid) / 1000000 + rh_linear; } /** * sht15_show_status() - show status information in sysfs * @dev: device. * @attr: device attribute. * @buf: sysfs buffer where information is written to. * * Will be called on read access to temp1_fault, humidity1_fault * and heater_enable sysfs attributes. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht15_show_status(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct sht15_data *data = dev_get_drvdata(dev); u8 bit = to_sensor_dev_attr(attr)->index; ret = sht15_update_status(data); return ret ? ret : sprintf(buf, "%d\n", !!(data->val_status & bit)); } /** * sht15_store_heater() - change heater state via sysfs * @dev: device. * @attr: device attribute. * @buf: sysfs buffer to read the new heater state from. * @count: length of the data. * * Will be called on write access to heater_enable sysfs attribute. * Returns number of bytes actually decoded, negative errno on error. */ static ssize_t sht15_store_heater(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct sht15_data *data = dev_get_drvdata(dev); long value; u8 status; if (kstrtol(buf, 10, &value)) return -EINVAL; mutex_lock(&data->read_lock); status = data->val_status & 0x07; if (!!value) status |= SHT15_STATUS_HEATER; else status &= ~SHT15_STATUS_HEATER; ret = sht15_send_status(data, status); mutex_unlock(&data->read_lock); return ret ? ret : count; } /** * sht15_show_temp() - show temperature measurement value in sysfs * @dev: device. * @attr: device attribute. * @buf: sysfs buffer where measurement values are written to. * * Will be called on read access to temp1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht15_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct sht15_data *data = dev_get_drvdata(dev); /* Technically no need to read humidity as well */ ret = sht15_update_measurements(data); return ret ? ret : sprintf(buf, "%d\n", sht15_calc_temp(data)); } /** * sht15_show_humidity() - show humidity measurement value in sysfs * @dev: device. * @attr: device attribute. * @buf: sysfs buffer where measurement values are written to. * * Will be called on read access to humidity1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht15_show_humidity(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct sht15_data *data = dev_get_drvdata(dev); ret = sht15_update_measurements(data); return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data)); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); return sprintf(buf, "%s\n", pdev->name); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht15_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht15_show_humidity, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, sht15_show_status, NULL, SHT15_STATUS_LOW_BATTERY); static SENSOR_DEVICE_ATTR(humidity1_fault, S_IRUGO, sht15_show_status, NULL, SHT15_STATUS_LOW_BATTERY); static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR, sht15_show_status, sht15_store_heater, SHT15_STATUS_HEATER); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *sht15_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_humidity1_input.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, &sensor_dev_attr_humidity1_fault.dev_attr.attr, &sensor_dev_attr_heater_enable.dev_attr.attr, &dev_attr_name.attr, NULL, }; static const struct attribute_group sht15_attr_group = { .attrs = sht15_attrs, }; static irqreturn_t sht15_interrupt_fired(int irq, void *d) { struct sht15_data *data = d; /* First disable the interrupt */ disable_irq_nosync(irq); atomic_inc(&data->interrupt_handled); /* Then schedule a reading work struct */ if (data->state != SHT15_READING_NOTHING) schedule_work(&data->read_work); return IRQ_HANDLED; } static void sht15_bh_read_data(struct work_struct *work_s) { uint16_t val = 0; u8 dev_checksum = 0; u8 checksum_vals[3]; struct sht15_data *data = container_of(work_s, struct sht15_data, read_work); /* Firstly, verify the line is low */ if (gpio_get_value(data->pdata->gpio_data)) { /* * If not, then start the interrupt again - care here as could * have gone low in meantime so verify it hasn't! */ atomic_set(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); /* If still not occurred or another handler was scheduled */ if (gpio_get_value(data->pdata->gpio_data) || atomic_read(&data->interrupt_handled)) return; } /* Read the data back from the device */ val = sht15_read_byte(data); val <<= 8; sht15_ack(data); val |= sht15_read_byte(data); if (data->checksumming) { /* * Ask the device for a checksum and read it back. * Note: the device sends the checksum byte reversed. */ sht15_ack(data); dev_checksum = sht15_reverse(sht15_read_byte(data)); checksum_vals[0] = (data->state == SHT15_READING_TEMP) ? SHT15_MEASURE_TEMP : SHT15_MEASURE_RH; checksum_vals[1] = (u8) (val >> 8); checksum_vals[2] = (u8) val; data->checksum_ok = (sht15_crc8(data, checksum_vals, 3) == dev_checksum); } /* Tell the device we are done */ sht15_end_transmission(data); switch (data->state) { case SHT15_READING_TEMP: data->val_temp = val; break; case SHT15_READING_HUMID: data->val_humid = val; break; default: break; } data->state = SHT15_READING_NOTHING; wake_up(&data->wait_queue); } static void sht15_update_voltage(struct work_struct *work_s) { struct sht15_data *data = container_of(work_s, struct sht15_data, update_supply_work); data->supply_uV = regulator_get_voltage(data->reg); } /** * sht15_invalidate_voltage() - mark supply voltage invalid when notified by reg * @nb: associated notification structure * @event: voltage regulator state change event code * @ignored: function parameter - ignored here * * Note that as the notification code holds the regulator lock, we have * to schedule an update of the supply voltage rather than getting it directly. */ static int sht15_invalidate_voltage(struct notifier_block *nb, unsigned long event, void *ignored) { struct sht15_data *data = container_of(nb, struct sht15_data, nb); if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) data->supply_uV_valid = false; schedule_work(&data->update_supply_work); return NOTIFY_OK; } static int __devinit sht15_probe(struct platform_device *pdev) { int ret; struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL); u8 status = 0; if (!data) { ret = -ENOMEM; dev_err(&pdev->dev, "kzalloc failed\n"); goto error_ret; } INIT_WORK(&data->read_work, sht15_bh_read_data); INIT_WORK(&data->update_supply_work, sht15_update_voltage); platform_set_drvdata(pdev, data); mutex_init(&data->read_lock); data->dev = &pdev->dev; init_waitqueue_head(&data->wait_queue); if (pdev->dev.platform_data == NULL) { ret = -EINVAL; dev_err(&pdev->dev, "no platform data supplied\n"); goto err_free_data; } data->pdata = pdev->dev.platform_data; data->supply_uV = data->pdata->supply_mv * 1000; if (data->pdata->checksum) data->checksumming = true; if (data->pdata->no_otp_reload) status |= SHT15_STATUS_NO_OTP_RELOAD; if (data->pdata->low_resolution) status |= SHT15_STATUS_LOW_RESOLUTION; /* * If a regulator is available, * query what the supply voltage actually is! */ data->reg = regulator_get(data->dev, "vcc"); if (!IS_ERR(data->reg)) { int voltage; voltage = regulator_get_voltage(data->reg); if (voltage) data->supply_uV = voltage; ret = regulator_enable(data->reg); if (ret != 0) { dev_err(&pdev->dev, "failed to enable regulator: %d\n", ret); goto err_free_data; } /* * Setup a notifier block to update this if another device * causes the voltage to change */ data->nb.notifier_call = &sht15_invalidate_voltage; ret = regulator_register_notifier(data->reg, &data->nb); if (ret) { dev_err(&pdev->dev, "regulator notifier request failed\n"); regulator_disable(data->reg); regulator_put(data->reg); goto err_free_data; } } /* Try requesting the GPIOs */ ret = gpio_request(data->pdata->gpio_sck, "SHT15 sck"); if (ret) { dev_err(&pdev->dev, "gpio request failed\n"); goto err_release_reg; } gpio_direction_output(data->pdata->gpio_sck, 0); ret = gpio_request(data->pdata->gpio_data, "SHT15 data"); if (ret) { dev_err(&pdev->dev, "gpio request failed\n"); goto err_release_gpio_sck; } ret = request_irq(gpio_to_irq(data->pdata->gpio_data), sht15_interrupt_fired, IRQF_TRIGGER_FALLING, "sht15 data", data); if (ret) { dev_err(&pdev->dev, "failed to get irq for data line\n"); goto err_release_gpio_data; } disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); sht15_connection_reset(data); ret = sht15_soft_reset(data); if (ret) goto err_release_irq; /* write status with platform data options */ if (status) { ret = sht15_send_status(data, status); if (ret) goto err_release_irq; } ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group); if (ret) { dev_err(&pdev->dev, "sysfs create failed\n"); goto err_release_irq; } data->hwmon_dev = hwmon_device_register(data->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto err_release_sysfs_group; } return 0; err_release_sysfs_group: sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group); err_release_irq: free_irq(gpio_to_irq(data->pdata->gpio_data), data); err_release_gpio_data: gpio_free(data->pdata->gpio_data); err_release_gpio_sck: gpio_free(data->pdata->gpio_sck); err_release_reg: if (!IS_ERR(data->reg)) { regulator_unregister_notifier(data->reg, &data->nb); regulator_disable(data->reg); regulator_put(data->reg); } err_free_data: kfree(data); error_ret: return ret; } static int __devexit sht15_remove(struct platform_device *pdev) { struct sht15_data *data = platform_get_drvdata(pdev); /* * Make sure any reads from the device are done and * prevent new ones beginning */ mutex_lock(&data->read_lock); if (sht15_soft_reset(data)) { mutex_unlock(&data->read_lock); return -EFAULT; } hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group); if (!IS_ERR(data->reg)) { regulator_unregister_notifier(data->reg, &data->nb); regulator_disable(data->reg); regulator_put(data->reg); } free_irq(gpio_to_irq(data->pdata->gpio_data), data); gpio_free(data->pdata->gpio_data); gpio_free(data->pdata->gpio_sck); mutex_unlock(&data->read_lock); kfree(data); return 0; } /* * sht_drivers simultaneously refers to __devinit and __devexit function * which causes spurious section mismatch warning. So use __refdata to * get rid from this. */ static struct platform_driver __refdata sht_drivers[] = { { .driver = { .name = "sht10", .owner = THIS_MODULE, }, .probe = sht15_probe, .remove = __devexit_p(sht15_remove), }, { .driver = { .name = "sht11", .owner = THIS_MODULE, }, .probe = sht15_probe, .remove = __devexit_p(sht15_remove), }, { .driver = { .name = "sht15", .owner = THIS_MODULE, }, .probe = sht15_probe, .remove = __devexit_p(sht15_remove), }, { .driver = { .name = "sht71", .owner = THIS_MODULE, }, .probe = sht15_probe, .remove = __devexit_p(sht15_remove), }, { .driver = { .name = "sht75", .owner = THIS_MODULE, }, .probe = sht15_probe, .remove = __devexit_p(sht15_remove), }, }; static int __init sht15_init(void) { int ret; int i; for (i = 0; i < ARRAY_SIZE(sht_drivers); i++) { ret = platform_driver_register(&sht_drivers[i]); if (ret) goto error_unreg; } return 0; error_unreg: while (--i >= 0) platform_driver_unregister(&sht_drivers[i]); return ret; } module_init(sht15_init); static void __exit sht15_exit(void) { int i; for (i = ARRAY_SIZE(sht_drivers) - 1; i >= 0; i--) platform_driver_unregister(&sht_drivers[i]); } module_exit(sht15_exit); MODULE_LICENSE("GPL");
gpl-2.0
7420dev/android_kernel_samsung_zeroflte
drivers/firmware/dmi_scan.c
1754
19877
#include <linux/types.h> #include <linux/string.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/dmi.h> #include <linux/efi.h> #include <linux/bootmem.h> #include <linux/random.h> #include <asm/dmi.h> /* * DMI stands for "Desktop Management Interface". It is part * of and an antecedent to, SMBIOS, which stands for System * Management BIOS. See further: http://www.dmtf.org/standards */ static char dmi_empty_string[] = " "; static u16 __initdata dmi_ver; /* * Catch too early calls to dmi_check_system(): */ static int dmi_initialized; /* DMI system identification string used during boot */ static char dmi_ids_string[128] __initdata; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; if (s) { s--; while (s > 0 && *bp) { bp += strlen(bp) + 1; s--; } if (*bp != 0) { size_t len = strlen(bp)+1; size_t cmp_len = len > 8 ? 8 : len; if (!memcmp(bp, dmi_empty_string, cmp_len)) return dmi_empty_string; return bp; } } return ""; } static char * __init dmi_string(const struct dmi_header *dm, u8 s) { const char *bp = dmi_string_nosave(dm, s); char *str; size_t len; if (bp == dmi_empty_string) return dmi_empty_string; len = strlen(bp) + 1; str = dmi_alloc(len); if (str != NULL) strcpy(str, bp); else printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len); return str; } /* * We have to be cautious here. We have seen BIOSes with DMI pointers * pointing to completely the wrong place for example */ static void dmi_table(u8 *buf, int len, int num, void (*decode)(const struct dmi_header *, void *), void *private_data) { u8 *data = buf; int i = 0; /* * Stop when we see all the items the table claimed to have * OR we run off the end of the table (also happens) */ while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { const struct dmi_header *dm = (const struct dmi_header *)data; /* * We want to know the total length (formatted area and * strings) before decoding to make sure we won't run off the * table in dmi_decode or dmi_string */ data += dm->length; while ((data - buf < len - 1) && (data[0] || data[1])) data++; if (data - buf < len - 1) decode(dm, private_data); data += 2; i++; } } static u32 dmi_base; static u16 dmi_len; static u16 dmi_num; static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, void *)) { u8 *buf; buf = dmi_ioremap(dmi_base, dmi_len); if (buf == NULL) return -1; dmi_table(buf, dmi_len, dmi_num, decode, NULL); add_device_randomness(buf, dmi_len); dmi_iounmap(buf, dmi_len); return 0; } static int __init dmi_checksum(const u8 *buf, u8 len) { u8 sum = 0; int a; for (a = 0; a < len; a++) sum += buf[a]; return sum == 0; } static char *dmi_ident[DMI_STRING_MAX]; static LIST_HEAD(dmi_devices); int dmi_available; /* * Save a DMI string */ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string) { const char *d = (const char*) dm; char *p; if (dmi_ident[slot]) return; p = dmi_string(dm, d[string]); if (p == NULL) return; dmi_ident[slot] = p; } static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index) { const u8 *d = (u8*) dm + index; char *s; int is_ff = 1, is_00 = 1, i; if (dmi_ident[slot]) return; for (i = 0; i < 16 && (is_ff || is_00); i++) { if (d[i] != 0x00) is_00 = 0; if (d[i] != 0xFF) is_ff = 0; } if (is_ff || is_00) return; s = dmi_alloc(16*2+4+1); if (!s) return; /* * As of version 2.6 of the SMBIOS specification, the first 3 fields of * the UUID are supposed to be little-endian encoded. The specification * says that this is the defacto standard. */ if (dmi_ver >= 0x0206) sprintf(s, "%pUL", d); else sprintf(s, "%pUB", d); dmi_ident[slot] = s; } static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index) { const u8 *d = (u8*) dm + index; char *s; if (dmi_ident[slot]) return; s = dmi_alloc(4); if (!s) return; sprintf(s, "%u", *d & 0x7F); dmi_ident[slot] = s; } static void __init dmi_save_one_device(int type, const char *name) { struct dmi_device *dev; /* No duplicate device */ if (dmi_find_device(type, name, NULL)) return; dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1); if (!dev) { printk(KERN_ERR "dmi_save_one_device: out of memory.\n"); return; } dev->type = type; strcpy((char *)(dev + 1), name); dev->name = (char *)(dev + 1); dev->device_data = NULL; list_add(&dev->list, &dmi_devices); } static void __init dmi_save_devices(const struct dmi_header *dm) { int i, count = (dm->length - sizeof(struct dmi_header)) / 2; for (i = 0; i < count; i++) { const char *d = (char *)(dm + 1) + (i * 2); /* Skip disabled device */ if ((*d & 0x80) == 0) continue; dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1))); } } static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) { int i, count = *(u8 *)(dm + 1); struct dmi_device *dev; for (i = 1; i <= count; i++) { char *devname = dmi_string(dm, i); if (devname == dmi_empty_string) continue; dev = dmi_alloc(sizeof(*dev)); if (!dev) { printk(KERN_ERR "dmi_save_oem_strings_devices: out of memory.\n"); break; } dev->type = DMI_DEV_TYPE_OEM_STRING; dev->name = devname; dev->device_data = NULL; list_add(&dev->list, &dmi_devices); } } static void __init dmi_save_ipmi_device(const struct dmi_header *dm) { struct dmi_device *dev; void * data; data = dmi_alloc(dm->length); if (data == NULL) { printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); return; } memcpy(data, dm, dm->length); dev = dmi_alloc(sizeof(*dev)); if (!dev) { printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); return; } dev->type = DMI_DEV_TYPE_IPMI; dev->name = "IPMI controller"; dev->device_data = data; list_add_tail(&dev->list, &dmi_devices); } static void __init dmi_save_dev_onboard(int instance, int segment, int bus, int devfn, const char *name) { struct dmi_dev_onboard *onboard_dev; onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1); if (!onboard_dev) { printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n"); return; } onboard_dev->instance = instance; onboard_dev->segment = segment; onboard_dev->bus = bus; onboard_dev->devfn = devfn; strcpy((char *)&onboard_dev[1], name); onboard_dev->dev.type = DMI_DEV_TYPE_DEV_ONBOARD; onboard_dev->dev.name = (char *)&onboard_dev[1]; onboard_dev->dev.device_data = onboard_dev; list_add(&onboard_dev->dev.list, &dmi_devices); } static void __init dmi_save_extended_devices(const struct dmi_header *dm) { const u8 *d = (u8*) dm + 5; /* Skip disabled device */ if ((*d & 0x80) == 0) return; dmi_save_dev_onboard(*(d+1), *(u16 *)(d+2), *(d+4), *(d+5), dmi_string_nosave(dm, *(d-1))); dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1))); } /* * Process a DMI table entry. Right now all we care about are the BIOS * and machine entries. For 2.5 we should pull the smbus controller info * out of here. */ static void __init dmi_decode(const struct dmi_header *dm, void *dummy) { switch(dm->type) { case 0: /* BIOS Information */ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); dmi_save_ident(dm, DMI_BIOS_VERSION, 5); dmi_save_ident(dm, DMI_BIOS_DATE, 8); break; case 1: /* System Information */ dmi_save_ident(dm, DMI_SYS_VENDOR, 4); dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); break; case 2: /* Base Board Information */ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); dmi_save_ident(dm, DMI_BOARD_NAME, 5); dmi_save_ident(dm, DMI_BOARD_VERSION, 6); dmi_save_ident(dm, DMI_BOARD_SERIAL, 7); dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8); break; case 3: /* Chassis Information */ dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4); dmi_save_type(dm, DMI_CHASSIS_TYPE, 5); dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6); dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7); dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8); break; case 10: /* Onboard Devices Information */ dmi_save_devices(dm); break; case 11: /* OEM Strings */ dmi_save_oem_strings_devices(dm); break; case 38: /* IPMI Device Information */ dmi_save_ipmi_device(dm); break; case 41: /* Onboard Devices Extended Information */ dmi_save_extended_devices(dm); } } static int __init print_filtered(char *buf, size_t len, const char *info) { int c = 0; const char *p; if (!info) return c; for (p = info; *p; p++) if (isprint(*p)) c += scnprintf(buf + c, len - c, "%c", *p); else c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff); return c; } static void __init dmi_format_ids(char *buf, size_t len) { int c = 0; const char *board; /* Board Name is optional */ c += print_filtered(buf + c, len - c, dmi_get_system_info(DMI_SYS_VENDOR)); c += scnprintf(buf + c, len - c, " "); c += print_filtered(buf + c, len - c, dmi_get_system_info(DMI_PRODUCT_NAME)); board = dmi_get_system_info(DMI_BOARD_NAME); if (board) { c += scnprintf(buf + c, len - c, "/"); c += print_filtered(buf + c, len - c, board); } c += scnprintf(buf + c, len - c, ", BIOS "); c += print_filtered(buf + c, len - c, dmi_get_system_info(DMI_BIOS_VERSION)); c += scnprintf(buf + c, len - c, " "); c += print_filtered(buf + c, len - c, dmi_get_system_info(DMI_BIOS_DATE)); } static int __init dmi_present(const u8 *buf) { int smbios_ver; if (memcmp(buf, "_SM_", 4) == 0 && buf[5] < 32 && dmi_checksum(buf, buf[5])) { smbios_ver = (buf[6] << 8) + buf[7]; /* Some BIOS report weird SMBIOS version, fix that up */ switch (smbios_ver) { case 0x021F: case 0x0221: pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", smbios_ver & 0xFF, 3); smbios_ver = 0x0203; break; case 0x0233: pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6); smbios_ver = 0x0206; break; } } else { smbios_ver = 0; } buf += 16; if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { dmi_num = (buf[13] << 8) | buf[12]; dmi_len = (buf[7] << 8) | buf[6]; dmi_base = (buf[11] << 24) | (buf[10] << 16) | (buf[9] << 8) | buf[8]; if (dmi_walk_early(dmi_decode) == 0) { if (smbios_ver) { dmi_ver = smbios_ver; pr_info("SMBIOS %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } else { dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F); pr_info("Legacy DMI %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string)); printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string); return 0; } } return 1; } void __init dmi_scan_machine(void) { char __iomem *p, *q; char buf[32]; if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.smbios == EFI_INVALID_TABLE_ADDR) goto error; /* This is called as a core_initcall() because it isn't * needed during early boot. This also means we can * iounmap the space when we're done with it. */ p = dmi_ioremap(efi.smbios, 32); if (p == NULL) goto error; memcpy_fromio(buf, p, 32); dmi_iounmap(p, 32); if (!dmi_present(buf)) { dmi_available = 1; goto out; } } else { /* * no iounmap() for that ioremap(); it would be a no-op, but * it's so early in setup that sucker gets confused into doing * what it shouldn't if we actually call it. */ p = dmi_ioremap(0xF0000, 0x10000); if (p == NULL) goto error; memset(buf, 0, 16); for (q = p; q < p + 0x10000; q += 16) { memcpy_fromio(buf + 16, q, 16); if (!dmi_present(buf)) { dmi_available = 1; dmi_iounmap(p, 0x10000); goto out; } memcpy(buf, buf + 16, 16); } dmi_iounmap(p, 0x10000); } error: printk(KERN_INFO "DMI not present or invalid.\n"); out: dmi_initialized = 1; } /** * dmi_set_dump_stack_arch_desc - set arch description for dump_stack() * * Invoke dump_stack_set_arch_desc() with DMI system information so that * DMI identifiers are printed out on task dumps. Arch boot code should * call this function after dmi_scan_machine() if it wants to print out DMI * identifiers on task dumps. */ void __init dmi_set_dump_stack_arch_desc(void) { dump_stack_set_arch_desc("%s", dmi_ids_string); } /** * dmi_matches - check if dmi_system_id structure matches system DMI data * @dmi: pointer to the dmi_system_id structure to check */ static bool dmi_matches(const struct dmi_system_id *dmi) { int i; WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n"); for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) { int s = dmi->matches[i].slot; if (s == DMI_NONE) break; if (dmi_ident[s]) { if (!dmi->matches[i].exact_match && strstr(dmi_ident[s], dmi->matches[i].substr)) continue; else if (dmi->matches[i].exact_match && !strcmp(dmi_ident[s], dmi->matches[i].substr)) continue; } /* No match */ return false; } return true; } /** * dmi_is_end_of_table - check for end-of-table marker * @dmi: pointer to the dmi_system_id structure to check */ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi) { return dmi->matches[0].slot == DMI_NONE; } /** * dmi_check_system - check system DMI data * @list: array of dmi_system_id structures to match against * All non-null elements of the list must match * their slot's (field index's) data (i.e., each * list string must be a substring of the specified * DMI slot's string data) to be considered a * successful match. * * Walk the blacklist table running matching functions until someone * returns non zero or we hit the end. Callback function is called for * each successful match. Returns the number of matches. */ int dmi_check_system(const struct dmi_system_id *list) { int count = 0; const struct dmi_system_id *d; for (d = list; !dmi_is_end_of_table(d); d++) if (dmi_matches(d)) { count++; if (d->callback && d->callback(d)) break; } return count; } EXPORT_SYMBOL(dmi_check_system); /** * dmi_first_match - find dmi_system_id structure matching system DMI data * @list: array of dmi_system_id structures to match against * All non-null elements of the list must match * their slot's (field index's) data (i.e., each * list string must be a substring of the specified * DMI slot's string data) to be considered a * successful match. * * Walk the blacklist table until the first match is found. Return the * pointer to the matching entry or NULL if there's no match. */ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list) { const struct dmi_system_id *d; for (d = list; !dmi_is_end_of_table(d); d++) if (dmi_matches(d)) return d; return NULL; } EXPORT_SYMBOL(dmi_first_match); /** * dmi_get_system_info - return DMI data value * @field: data index (see enum dmi_field) * * Returns one DMI data value, can be used to perform * complex DMI data checks. */ const char *dmi_get_system_info(int field) { return dmi_ident[field]; } EXPORT_SYMBOL(dmi_get_system_info); /** * dmi_name_in_serial - Check if string is in the DMI product serial information * @str: string to check for */ int dmi_name_in_serial(const char *str) { int f = DMI_PRODUCT_SERIAL; if (dmi_ident[f] && strstr(dmi_ident[f], str)) return 1; return 0; } /** * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name * @str: Case sensitive Name */ int dmi_name_in_vendors(const char *str) { static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE }; int i; for (i = 0; fields[i] != DMI_NONE; i++) { int f = fields[i]; if (dmi_ident[f] && strstr(dmi_ident[f], str)) return 1; } return 0; } EXPORT_SYMBOL(dmi_name_in_vendors); /** * dmi_find_device - find onboard device by type/name * @type: device type or %DMI_DEV_TYPE_ANY to match all device types * @name: device name string or %NULL to match all * @from: previous device found in search, or %NULL for new search. * * Iterates through the list of known onboard devices. If a device is * found with a matching @vendor and @device, a pointer to its device * structure is returned. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * If @from is not %NULL, searches continue from next device. */ const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { const struct list_head *head = from ? &from->list : &dmi_devices; struct list_head *d; for(d = head->next; d != &dmi_devices; d = d->next) { const struct dmi_device *dev = list_entry(d, struct dmi_device, list); if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) && ((name == NULL) || (strcmp(dev->name, name) == 0))) return dev; } return NULL; } EXPORT_SYMBOL(dmi_find_device); /** * dmi_get_date - parse a DMI date * @field: data index (see enum dmi_field) * @yearp: optional out parameter for the year * @monthp: optional out parameter for the month * @dayp: optional out parameter for the day * * The date field is assumed to be in the form resembling * [mm[/dd]]/yy[yy] and the result is stored in the out * parameters any or all of which can be omitted. * * If the field doesn't exist, all out parameters are set to zero * and false is returned. Otherwise, true is returned with any * invalid part of date set to zero. * * On return, year, month and day are guaranteed to be in the * range of [0,9999], [0,12] and [0,31] respectively. */ bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { int year = 0, month = 0, day = 0; bool exists; const char *s, *y; char *e; s = dmi_get_system_info(field); exists = s; if (!exists) goto out; /* * Determine year first. We assume the date string resembles * mm/dd/yy[yy] but the original code extracted only the year * from the end. Keep the behavior in the spirit of no * surprises. */ y = strrchr(s, '/'); if (!y) goto out; y++; year = simple_strtoul(y, &e, 10); if (y != e && year < 100) { /* 2-digit year */ year += 1900; if (year < 1996) /* no dates < spec 1.0 */ year += 100; } if (year > 9999) /* year should fit in %04d */ year = 0; /* parse the mm and dd */ month = simple_strtoul(s, &e, 10); if (s == e || *e != '/' || !month || month > 12) { month = 0; goto out; } s = e + 1; day = simple_strtoul(s, &e, 10); if (s == y || s == e || *e != '/' || day > 31) day = 0; out: if (yearp) *yearp = year; if (monthp) *monthp = month; if (dayp) *dayp = day; return exists; } EXPORT_SYMBOL(dmi_get_date); /** * dmi_walk - Walk the DMI table and get called back for every record * @decode: Callback function * @private_data: Private data to be passed to the callback function * * Returns -1 when the DMI table can't be reached, 0 on success. */ int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data) { u8 *buf; if (!dmi_available) return -1; buf = ioremap(dmi_base, dmi_len); if (buf == NULL) return -1; dmi_table(buf, dmi_len, dmi_num, decode, private_data); iounmap(buf); return 0; } EXPORT_SYMBOL_GPL(dmi_walk); /** * dmi_match - compare a string to the dmi field (if exists) * @f: DMI field identifier * @str: string to compare the DMI field to * * Returns true if the requested field equals to the str (including NULL). */ bool dmi_match(enum dmi_field f, const char *str) { const char *info = dmi_get_system_info(f); if (info == NULL || str == NULL) return info == str; return !strcmp(info, str); } EXPORT_SYMBOL_GPL(dmi_match);
gpl-2.0
Art-Chen/android_kernel_samsung_galaxys2plus-common
fs/notify/vfsmount_mark.c
2522
5188
/* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <asm/atomic.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) { struct fsnotify_mark *mark, *lmark; struct hlist_node *pos, *n; LIST_HEAD(free_list); spin_lock(&mnt->mnt_root->d_lock); hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) { list_add(&mark->m.free_m_list, &free_list); hlist_del_init_rcu(&mark->m.m_list); fsnotify_get_mark(mark); } spin_unlock(&mnt->mnt_root->d_lock); list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) { fsnotify_destroy_mark(mark); fsnotify_put_mark(mark); } } void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT); } /* * Recalculate the mask of events relevant to a given vfsmount locked. */ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt) { struct fsnotify_mark *mark; struct hlist_node *pos; __u32 new_mask = 0; assert_spin_locked(&mnt->mnt_root->d_lock); hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) new_mask |= mark->mask; mnt->mnt_fsnotify_mask = new_mask; } /* * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types * any notifier is interested in hearing for this mount point */ void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt) { spin_lock(&mnt->mnt_root->d_lock); fsnotify_recalc_vfsmount_mask_locked(mnt); spin_unlock(&mnt->mnt_root->d_lock); } void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark) { struct vfsmount *mnt = mark->m.mnt; assert_spin_locked(&mark->lock); assert_spin_locked(&mark->group->mark_lock); spin_lock(&mnt->mnt_root->d_lock); hlist_del_init_rcu(&mark->m.m_list); mark->m.mnt = NULL; fsnotify_recalc_vfsmount_mask_locked(mnt); spin_unlock(&mnt->mnt_root->d_lock); } static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group, struct vfsmount *mnt) { struct fsnotify_mark *mark; struct hlist_node *pos; assert_spin_locked(&mnt->mnt_root->d_lock); hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) { if (mark->group == group) { fsnotify_get_mark(mark); return mark; } } return NULL; } /* * given a group and vfsmount, find the mark associated with that combination. * if found take a reference to that mark and return it, else return NULL */ struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt) { struct fsnotify_mark *mark; spin_lock(&mnt->mnt_root->d_lock); mark = fsnotify_find_vfsmount_mark_locked(group, mnt); spin_unlock(&mnt->mnt_root->d_lock); return mark; } /* * Attach an initialized mark to a given group and vfsmount. * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which groups. */ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct vfsmount *mnt, int allow_dups) { struct fsnotify_mark *lmark; struct hlist_node *node, *last = NULL; int ret = 0; mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; assert_spin_locked(&mark->lock); assert_spin_locked(&group->mark_lock); spin_lock(&mnt->mnt_root->d_lock); mark->m.mnt = mnt; /* is mark the first mark? */ if (hlist_empty(&mnt->mnt_fsnotify_marks)) { hlist_add_head_rcu(&mark->m.m_list, &mnt->mnt_fsnotify_marks); goto out; } /* should mark be in the middle of the current list? */ hlist_for_each_entry(lmark, node, &mnt->mnt_fsnotify_marks, m.m_list) { last = node; if ((lmark->group == group) && !allow_dups) { ret = -EEXIST; goto out; } if (mark->group->priority < lmark->group->priority) continue; if ((mark->group->priority == lmark->group->priority) && (mark->group < lmark->group)) continue; hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list); goto out; } BUG_ON(last == NULL); /* mark should be the last entry. last is the current last entry */ hlist_add_after_rcu(last, &mark->m.m_list); out: fsnotify_recalc_vfsmount_mask_locked(mnt); spin_unlock(&mnt->mnt_root->d_lock); return ret; }
gpl-2.0
kprkpr/kernel-e400
drivers/acpi/power.c
3290
16427
/* * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* * ACPI power-managed devices may be controlled in two ways: * 1. via "Device Specific (D-State) Control" * 2. via "Power Resource Control". * This module is used to manage devices relying on Power Resource Control. * * An ACPI "power resource object" describes a software controllable power * plane, clock plane, or other resource used by a power managed device. * A device may rely on multiple power resources, and a power resource * may be shared by multiple devices. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include "sleep.h" #define PREFIX "ACPI: " #define _COMPONENT ACPI_POWER_COMPONENT ACPI_MODULE_NAME("power"); #define ACPI_POWER_CLASS "power_resource" #define ACPI_POWER_DEVICE_NAME "Power Resource" #define ACPI_POWER_FILE_INFO "info" #define ACPI_POWER_FILE_STATUS "state" #define ACPI_POWER_RESOURCE_STATE_OFF 0x00 #define ACPI_POWER_RESOURCE_STATE_ON 0x01 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF static int acpi_power_add(struct acpi_device *device); static int acpi_power_remove(struct acpi_device *device, int type); static int acpi_power_resume(struct acpi_device *device); static const struct acpi_device_id power_device_ids[] = { {ACPI_POWER_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, power_device_ids); static struct acpi_driver acpi_power_driver = { .name = "power", .class = ACPI_POWER_CLASS, .ids = power_device_ids, .ops = { .add = acpi_power_add, .remove = acpi_power_remove, .resume = acpi_power_resume, }, }; struct acpi_power_resource { struct acpi_device * device; acpi_bus_id name; u32 system_level; u32 order; unsigned int ref_count; struct mutex resource_lock; }; static struct list_head acpi_power_resource_list; /* -------------------------------------------------------------------------- Power Resource Management -------------------------------------------------------------------------- */ static int acpi_power_get_context(acpi_handle handle, struct acpi_power_resource **resource) { int result = 0; struct acpi_device *device = NULL; if (!resource) return -ENODEV; result = acpi_bus_get_device(handle, &device); if (result) { printk(KERN_WARNING PREFIX "Getting context [%p]\n", handle); return result; } *resource = acpi_driver_data(device); if (!*resource) return -ENODEV; return 0; } static int acpi_power_get_state(acpi_handle handle, int *state) { acpi_status status = AE_OK; unsigned long long sta = 0; char node_name[5]; struct acpi_buffer buffer = { sizeof(node_name), node_name }; if (!handle || !state) return -EINVAL; status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return -ENODEV; *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON: ACPI_POWER_RESOURCE_STATE_OFF; acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", node_name, *state ? "on" : "off")); return 0; } static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) { int cur_state; int i = 0; if (!list || !state) return -EINVAL; /* The state of the list is 'on' IFF all resources are 'on'. */ for (i = 0; i < list->count; i++) { struct acpi_power_resource *resource; acpi_handle handle = list->handles[i]; int result; result = acpi_power_get_context(handle, &resource); if (result) return result; mutex_lock(&resource->resource_lock); result = acpi_power_get_state(handle, &cur_state); mutex_unlock(&resource->resource_lock); if (result) return result; if (cur_state != ACPI_POWER_RESOURCE_STATE_ON) break; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", cur_state ? "on" : "off")); *state = cur_state; return 0; } static int __acpi_power_on(struct acpi_power_resource *resource) { acpi_status status = AE_OK; status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); if (ACPI_FAILURE(status)) return -ENODEV; /* Update the power resource's _device_ power state */ resource->device->power.state = ACPI_STATE_D0; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", resource->name)); return 0; } static int acpi_power_on(acpi_handle handle) { int result = 0; struct acpi_power_resource *resource = NULL; result = acpi_power_get_context(handle, &resource); if (result) return result; mutex_lock(&resource->resource_lock); if (resource->ref_count++) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] already on", resource->name)); } else { result = __acpi_power_on(resource); if (result) resource->ref_count--; } mutex_unlock(&resource->resource_lock); return result; } static int acpi_power_off(acpi_handle handle) { int result = 0; acpi_status status = AE_OK; struct acpi_power_resource *resource = NULL; result = acpi_power_get_context(handle, &resource); if (result) return result; mutex_lock(&resource->resource_lock); if (!resource->ref_count) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] already off", resource->name)); goto unlock; } if (--resource->ref_count) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] still in use\n", resource->name)); goto unlock; } status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL); if (ACPI_FAILURE(status)) { result = -ENODEV; } else { /* Update the power resource's _device_ power state */ resource->device->power.state = ACPI_STATE_D3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n", resource->name)); } unlock: mutex_unlock(&resource->resource_lock); return result; } static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) { int i; for (i = num_res - 1; i >= 0 ; i--) acpi_power_off(list->handles[i]); } static void acpi_power_off_list(struct acpi_handle_list *list) { __acpi_power_off_list(list, list->count); } static int acpi_power_on_list(struct acpi_handle_list *list) { int result = 0; int i; for (i = 0; i < list->count; i++) { result = acpi_power_on(list->handles[i]); if (result) { __acpi_power_off_list(list, i); break; } } return result; } /** * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in * ACPI 3.0) _PSW (Power State Wake) * @dev: Device to handle. * @enable: 0 - disable, 1 - enable the wake capabilities of the device. * @sleep_state: Target sleep state of the system. * @dev_state: Target power state of the device. * * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present. On failure reset the device's * wakeup.flags.valid flag. * * RETURN VALUE: * 0 if either _DSW or _PSW has been successfully executed * 0 if neither _DSW nor _PSW has been found * -ENODEV if the execution of either _DSW or _PSW has failed */ int acpi_device_sleep_wake(struct acpi_device *dev, int enable, int sleep_state, int dev_state) { union acpi_object in_arg[3]; struct acpi_object_list arg_list = { 3, in_arg }; acpi_status status = AE_OK; /* * Try to execute _DSW first. * * Three agruments are needed for the _DSW object: * Argument 0: enable/disable the wake capabilities * Argument 1: target system state * Argument 2: target device state * When _DSW object is called to disable the wake capabilities, maybe * the first argument is filled. The values of the other two agruments * are meaningless. */ in_arg[0].type = ACPI_TYPE_INTEGER; in_arg[0].integer.value = enable; in_arg[1].type = ACPI_TYPE_INTEGER; in_arg[1].integer.value = sleep_state; in_arg[2].type = ACPI_TYPE_INTEGER; in_arg[2].integer.value = dev_state; status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL); if (ACPI_SUCCESS(status)) { return 0; } else if (status != AE_NOT_FOUND) { printk(KERN_ERR PREFIX "_DSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } /* Execute _PSW */ arg_list.count = 1; in_arg[0].integer.value = enable; status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { printk(KERN_ERR PREFIX "_PSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } return 0; } /* * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): * 1. Power on the power resources required for the wakeup device * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present */ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) { int i, err = 0; if (!dev || !dev->wakeup.flags.valid) return -EINVAL; mutex_lock(&acpi_device_lock); if (dev->wakeup.prepare_count++) goto out; /* Open power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { int ret = acpi_power_on(dev->wakeup.resources.handles[i]); if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; err = -ENODEV; goto err_out; } } /* * Passing 3 as the third argument below means the device may be placed * in arbitrary power state afterwards. */ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); err_out: if (err) dev->wakeup.prepare_count = 0; out: mutex_unlock(&acpi_device_lock); return err; } /* * Shutdown a wakeup device, counterpart of above method * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present * 2. Shutdown down the power resources */ int acpi_disable_wakeup_device_power(struct acpi_device *dev) { int i, err = 0; if (!dev || !dev->wakeup.flags.valid) return -EINVAL; mutex_lock(&acpi_device_lock); if (--dev->wakeup.prepare_count > 0) goto out; /* * Executing the code below even if prepare_count is already zero when * the function is called may be useful, for example for initialisation. */ if (dev->wakeup.prepare_count < 0) dev->wakeup.prepare_count = 0; err = acpi_device_sleep_wake(dev, 0, 0, 0); if (err) goto out; /* Close power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { int ret = acpi_power_off(dev->wakeup.resources.handles[i]); if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; err = -ENODEV; goto out; } } out: mutex_unlock(&acpi_device_lock); return err; } /* -------------------------------------------------------------------------- Device Power Management -------------------------------------------------------------------------- */ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) { int result = 0; struct acpi_handle_list *list = NULL; int list_state = 0; int i = 0; if (!device || !state) return -EINVAL; /* * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. */ for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) { list = &device->power.states[i].resources; if (list->count < 1) continue; result = acpi_power_get_list_state(list, &list_state); if (result) return result; if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { *state = i; return 0; } } *state = ACPI_STATE_D3; return 0; } int acpi_power_on_resources(struct acpi_device *device, int state) { if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) return -EINVAL; return acpi_power_on_list(&device->power.states[state].resources); } int acpi_power_transition(struct acpi_device *device, int state) { int result; if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) return -EINVAL; if (device->power.state == state) return 0; if ((device->power.state < ACPI_STATE_D0) || (device->power.state > ACPI_STATE_D3)) return -ENODEV; /* TBD: Resources must be ordered. */ /* * First we reference all power resources required in the target list * (e.g. so the device doesn't lose power while transitioning). Then, * we dereference all power resources used in the current list. */ result = acpi_power_on_list(&device->power.states[state].resources); if (!result) acpi_power_off_list( &device->power.states[device->power.state].resources); /* We shouldn't change the state unless the above operations succeed. */ device->power.state = result ? ACPI_STATE_UNKNOWN : state; return result; } /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static int acpi_power_add(struct acpi_device *device) { int result = 0, state; acpi_status status = AE_OK; struct acpi_power_resource *resource = NULL; union acpi_object acpi_object; struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object }; if (!device) return -EINVAL; resource = kzalloc(sizeof(struct acpi_power_resource), GFP_KERNEL); if (!resource) return -ENOMEM; resource->device = device; mutex_init(&resource->resource_lock); strcpy(resource->name, device->pnp.bus_id); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); device->driver_data = resource; /* Evalute the object to get the system level and resource order. */ status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { result = -ENODEV; goto end; } resource->system_level = acpi_object.power_resource.system_level; resource->order = acpi_object.power_resource.resource_order; result = acpi_power_get_state(device->handle, &state); if (result) goto end; switch (state) { case ACPI_POWER_RESOURCE_STATE_ON: device->power.state = ACPI_STATE_D0; break; case ACPI_POWER_RESOURCE_STATE_OFF: device->power.state = ACPI_STATE_D3; break; default: device->power.state = ACPI_STATE_UNKNOWN; break; } printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), state ? "on" : "off"); end: if (result) kfree(resource); return result; } static int acpi_power_remove(struct acpi_device *device, int type) { struct acpi_power_resource *resource; if (!device) return -EINVAL; resource = acpi_driver_data(device); if (!resource) return -EINVAL; kfree(resource); return 0; } static int acpi_power_resume(struct acpi_device *device) { int result = 0, state; struct acpi_power_resource *resource; if (!device) return -EINVAL; resource = acpi_driver_data(device); if (!resource) return -EINVAL; mutex_lock(&resource->resource_lock); result = acpi_power_get_state(device->handle, &state); if (result) goto unlock; if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count) result = __acpi_power_on(resource); unlock: mutex_unlock(&resource->resource_lock); return result; } int __init acpi_power_init(void) { INIT_LIST_HEAD(&acpi_power_resource_list); return acpi_bus_register_driver(&acpi_power_driver); }
gpl-2.0
SOKP/kernel_mediatek_sprout
security/tomoyo/tomoyo.c
3290
14937
/* * security/tomoyo/tomoyo.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/security.h> #include "common.h" /** * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank(). * * @new: Pointer to "struct cred". * @gfp: Memory allocation flags. * * Returns 0. */ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp) { new->security = NULL; return 0; } /** * tomoyo_cred_prepare - Target for security_prepare_creds(). * * @new: Pointer to "struct cred". * @old: Pointer to "struct cred". * @gfp: Memory allocation flags. * * Returns 0. */ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { struct tomoyo_domain_info *domain = old->security; new->security = domain; if (domain) atomic_inc(&domain->users); return 0; } /** * tomoyo_cred_transfer - Target for security_transfer_creds(). * * @new: Pointer to "struct cred". * @old: Pointer to "struct cred". */ static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) { tomoyo_cred_prepare(new, old, 0); } /** * tomoyo_cred_free - Target for security_cred_free(). * * @cred: Pointer to "struct cred". */ static void tomoyo_cred_free(struct cred *cred) { struct tomoyo_domain_info *domain = cred->security; if (domain) atomic_dec(&domain->users); } /** * tomoyo_bprm_set_creds - Target for security_bprm_set_creds(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) { int rc; rc = cap_bprm_set_creds(bprm); if (rc) return rc; /* * Do only if this function is called for the first time of an execve * operation. */ if (bprm->cred_prepared) return 0; #ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); #endif /* * Release reference to "struct tomoyo_domain_info" stored inside * "bprm->cred->security". New reference to "struct tomoyo_domain_info" * stored inside "bprm->cred->security" will be acquired later inside * tomoyo_find_next_domain(). */ atomic_dec(&((struct tomoyo_domain_info *) bprm->cred->security)->users); /* * Tell tomoyo_bprm_check_security() is called for the first time of an * execve operation. */ bprm->cred->security = NULL; return 0; } /** * tomoyo_bprm_check_security - Target for security_bprm_check(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) { struct tomoyo_domain_info *domain = bprm->cred->security; /* * Execute permission is checked against pathname passed to do_execve() * using current domain. */ if (!domain) { const int idx = tomoyo_read_lock(); const int err = tomoyo_find_next_domain(bprm); tomoyo_read_unlock(idx); return err; } /* * Read permission is checked against interpreters using next domain. */ return tomoyo_check_open_permission(domain, &bprm->file->f_path, O_RDONLY); } /** * tomoyo_inode_getattr - Target for security_inode_getattr(). * * @mnt: Pointer to "struct vfsmount". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { struct path path = { mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL); } /** * tomoyo_path_truncate - Target for security_path_truncate(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_truncate(struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL); } /** * tomoyo_path_unlink - Target for security_path_unlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); } /** * tomoyo_path_mkdir - Target for security_path_mkdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, umode_t mode) { struct path path = { parent->mnt, dentry }; return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, mode & S_IALLUGO); } /** * tomoyo_path_rmdir - Target for security_path_rmdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); } /** * tomoyo_path_symlink - Target for security_path_symlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @old_name: Symlink's content. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, const char *old_name) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); } /** * tomoyo_path_mknod - Target for security_path_mknod(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * @dev: Device attributes. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, umode_t mode, unsigned int dev) { struct path path = { parent->mnt, dentry }; int type = TOMOYO_TYPE_CREATE; const unsigned int perm = mode & S_IALLUGO; switch (mode & S_IFMT) { case S_IFCHR: type = TOMOYO_TYPE_MKCHAR; break; case S_IFBLK: type = TOMOYO_TYPE_MKBLOCK; break; default: goto no_dev; } return tomoyo_mkdev_perm(type, &path, perm, dev); no_dev: switch (mode & S_IFMT) { case S_IFIFO: type = TOMOYO_TYPE_MKFIFO; break; case S_IFSOCK: type = TOMOYO_TYPE_MKSOCK; break; } return tomoyo_path_number_perm(type, &path, perm); } /** * tomoyo_path_link - Target for security_path_link(). * * @old_dentry: Pointer to "struct dentry". * @new_dir: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct path path1 = { new_dir->mnt, old_dentry }; struct path path2 = { new_dir->mnt, new_dentry }; return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2); } /** * tomoyo_path_rename - Target for security_path_rename(). * * @old_parent: Pointer to "struct path". * @old_dentry: Pointer to "struct dentry". * @new_parent: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rename(struct path *old_parent, struct dentry *old_dentry, struct path *new_parent, struct dentry *new_dentry) { struct path path1 = { old_parent->mnt, old_dentry }; struct path path2 = { new_parent->mnt, new_dentry }; return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2); } /** * tomoyo_file_fcntl - Target for security_file_fcntl(). * * @file: Pointer to "struct file". * @cmd: Command for fcntl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path, O_WRONLY | (arg & O_APPEND)); } /** * tomoyo_file_open - Target for security_file_open(). * * @f: Pointer to "struct file". * @cred: Pointer to "struct cred". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_open(struct file *f, const struct cred *cred) { int flags = f->f_flags; /* Don't check read permission here if called from do_execve(). */ if (current->in_execve) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags); } /** * tomoyo_file_ioctl - Target for security_file_ioctl(). * * @file: Pointer to "struct file". * @cmd: Command for ioctl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd); } /** * tomoyo_path_chmod - Target for security_path_chmod(). * * @path: Pointer to "struct path". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chmod(struct path *path, umode_t mode) { return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path, mode & S_IALLUGO); } /** * tomoyo_path_chown - Target for security_path_chown(). * * @path: Pointer to "struct path". * @uid: Owner ID. * @gid: Group ID. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid) { int error = 0; if (uid_valid(uid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, from_kuid(&init_user_ns, uid)); if (!error && gid_valid(gid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, from_kgid(&init_user_ns, gid)); return error; } /** * tomoyo_path_chroot - Target for security_path_chroot(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chroot(struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL); } /** * tomoyo_sb_mount - Target for security_sb_mount(). * * @dev_name: Name of device file. Maybe NULL. * @path: Pointer to "struct path". * @type: Name of filesystem type. Maybe NULL. * @flags: Mount options. * @data: Optional data. Maybe NULL. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_mount(const char *dev_name, struct path *path, const char *type, unsigned long flags, void *data) { return tomoyo_mount_permission(dev_name, path, type, flags, data); } /** * tomoyo_sb_umount - Target for security_sb_umount(). * * @mnt: Pointer to "struct vfsmount". * @flags: Unmount options. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_umount(struct vfsmount *mnt, int flags) { struct path path = { mnt, mnt->mnt_root }; return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); } /** * tomoyo_sb_pivotroot - Target for security_sb_pivotroot(). * * @old_path: Pointer to "struct path". * @new_path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path) { return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path); } /** * tomoyo_socket_listen - Check permission for listen(). * * @sock: Pointer to "struct socket". * @backlog: Backlog parameter. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_listen(struct socket *sock, int backlog) { return tomoyo_socket_listen_permission(sock); } /** * tomoyo_socket_connect - Check permission for connect(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_connect_permission(sock, addr, addr_len); } /** * tomoyo_socket_bind - Check permission for bind(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_bind_permission(sock, addr, addr_len); } /** * tomoyo_socket_sendmsg - Check permission for sendmsg(). * * @sock: Pointer to "struct socket". * @msg: Pointer to "struct msghdr". * @size: Size of message. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return tomoyo_socket_sendmsg_permission(sock, msg, size); } /* * tomoyo_security_ops is a "struct security_operations" which is used for * registering TOMOYO. */ static struct security_operations tomoyo_security_ops = { .name = "tomoyo", .cred_alloc_blank = tomoyo_cred_alloc_blank, .cred_prepare = tomoyo_cred_prepare, .cred_transfer = tomoyo_cred_transfer, .cred_free = tomoyo_cred_free, .bprm_set_creds = tomoyo_bprm_set_creds, .bprm_check_security = tomoyo_bprm_check_security, .file_fcntl = tomoyo_file_fcntl, .file_open = tomoyo_file_open, .path_truncate = tomoyo_path_truncate, .path_unlink = tomoyo_path_unlink, .path_mkdir = tomoyo_path_mkdir, .path_rmdir = tomoyo_path_rmdir, .path_symlink = tomoyo_path_symlink, .path_mknod = tomoyo_path_mknod, .path_link = tomoyo_path_link, .path_rename = tomoyo_path_rename, .inode_getattr = tomoyo_inode_getattr, .file_ioctl = tomoyo_file_ioctl, .path_chmod = tomoyo_path_chmod, .path_chown = tomoyo_path_chown, .path_chroot = tomoyo_path_chroot, .sb_mount = tomoyo_sb_mount, .sb_umount = tomoyo_sb_umount, .sb_pivotroot = tomoyo_sb_pivotroot, .socket_bind = tomoyo_socket_bind, .socket_connect = tomoyo_socket_connect, .socket_listen = tomoyo_socket_listen, .socket_sendmsg = tomoyo_socket_sendmsg, }; /* Lock for GC. */ DEFINE_SRCU(tomoyo_ss); /** * tomoyo_init - Register TOMOYO Linux as a LSM module. * * Returns 0. */ static int __init tomoyo_init(void) { struct cred *cred = (struct cred *) current_cred(); if (!security_module_enable(&tomoyo_security_ops)) return 0; /* register ourselves with the security framework */ if (register_security(&tomoyo_security_ops)) panic("Failure registering TOMOYO Linux"); printk(KERN_INFO "TOMOYO Linux initialized\n"); cred->security = &tomoyo_kernel_domain; tomoyo_mm_init(); return 0; } security_initcall(tomoyo_init);
gpl-2.0
Kra1o5/android_kernel_bq_curie2qc
drivers/hwmon/via686a.c
3290
31485
/* via686a.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki <kmalkki@cc.hut.fi>, Mark Studebaker <mdsxyz123@yahoo.com>, and Bob Dougherty <bobd@stanford.edu> (Some conversion-factor data were contributed by Jonathan Teh Soon Yew <j.teh@iname.com> and Alex van Kaam <darkside@chello.nl>.) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports the Via VT82C686A, VT82C686B south bridges. Reports all as a 686A. Warning - only supports a single device. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/acpi.h> #include <linux/io.h> /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static unsigned short force_addr; module_param(force_addr, ushort, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors"); static struct platform_device *pdev; /* The Via 686a southbridge has a LM78-like chip integrated on the same IC. This driver is a customized copy of lm78.c */ /* Many VIA686A constants specified below */ /* Length of ISA address segment */ #define VIA686A_EXTENT 0x80 #define VIA686A_BASE_REG 0x70 #define VIA686A_ENABLE_REG 0x74 /* The VIA686A registers */ /* ins numbered 0-4 */ #define VIA686A_REG_IN_MAX(nr) (0x2b + ((nr) * 2)) #define VIA686A_REG_IN_MIN(nr) (0x2c + ((nr) * 2)) #define VIA686A_REG_IN(nr) (0x22 + (nr)) /* fans numbered 1-2 */ #define VIA686A_REG_FAN_MIN(nr) (0x3a + (nr)) #define VIA686A_REG_FAN(nr) (0x28 + (nr)) /* temps numbered 1-3 */ static const u8 VIA686A_REG_TEMP[] = { 0x20, 0x21, 0x1f }; static const u8 VIA686A_REG_TEMP_OVER[] = { 0x39, 0x3d, 0x1d }; static const u8 VIA686A_REG_TEMP_HYST[] = { 0x3a, 0x3e, 0x1e }; /* bits 7-6 */ #define VIA686A_REG_TEMP_LOW1 0x4b /* 2 = bits 5-4, 3 = bits 7-6 */ #define VIA686A_REG_TEMP_LOW23 0x49 #define VIA686A_REG_ALARM1 0x41 #define VIA686A_REG_ALARM2 0x42 #define VIA686A_REG_FANDIV 0x47 #define VIA686A_REG_CONFIG 0x40 /* The following register sets temp interrupt mode (bits 1-0 for temp1, 3-2 for temp2, 5-4 for temp3). Modes are: 00 interrupt stays as long as value is out-of-range 01 interrupt is cleared once register is read (default) 10 comparator mode- like 00, but ignores hysteresis 11 same as 00 */ #define VIA686A_REG_TEMP_MODE 0x4b /* We'll just assume that you want to set all 3 simultaneously: */ #define VIA686A_TEMP_MODE_MASK 0x3F #define VIA686A_TEMP_MODE_CONTINUOUS 0x00 /* Conversions. Limit checking is only done on the TO_REG variants. ********* VOLTAGE CONVERSIONS (Bob Dougherty) ******** From HWMon.cpp (Copyright 1998-2000 Jonathan Teh Soon Yew): voltagefactor[0]=1.25/2628; (2628/1.25=2102.4) // Vccp voltagefactor[1]=1.25/2628; (2628/1.25=2102.4) // +2.5V voltagefactor[2]=1.67/2628; (2628/1.67=1573.7) // +3.3V voltagefactor[3]=2.6/2628; (2628/2.60=1010.8) // +5V voltagefactor[4]=6.3/2628; (2628/6.30=417.14) // +12V in[i]=(data[i+2]*25.0+133)*voltagefactor[i]; That is: volts = (25*regVal+133)*factor regVal = (volts/factor-133)/25 (These conversions were contributed by Jonathan Teh Soon Yew <j.teh@iname.com>) */ static inline u8 IN_TO_REG(long val, int inNum) { /* To avoid floating point, we multiply constants by 10 (100 for +12V). Rounding is done (120500 is actually 133000 - 12500). Remember that val is expressed in 0.001V/bit, which is why we divide by an additional 10000 (100000 for +12V): 1000 for val and 10 (100) for the constants. */ if (inNum <= 1) return (u8) SENSORS_LIMIT((val * 21024 - 1205000) / 250000, 0, 255); else if (inNum == 2) return (u8) SENSORS_LIMIT((val * 15737 - 1205000) / 250000, 0, 255); else if (inNum == 3) return (u8) SENSORS_LIMIT((val * 10108 - 1205000) / 250000, 0, 255); else return (u8) SENSORS_LIMIT((val * 41714 - 12050000) / 2500000, 0, 255); } static inline long IN_FROM_REG(u8 val, int inNum) { /* To avoid floating point, we multiply constants by 10 (100 for +12V). We also multiply them by 1000 because we want 0.001V/bit for the output value. Rounding is done. */ if (inNum <= 1) return (long) ((250000 * val + 1330000 + 21024 / 2) / 21024); else if (inNum == 2) return (long) ((250000 * val + 1330000 + 15737 / 2) / 15737); else if (inNum == 3) return (long) ((250000 * val + 1330000 + 10108 / 2) / 10108); else return (long) ((2500000 * val + 13300000 + 41714 / 2) / 41714); } /********* FAN RPM CONVERSIONS ********/ /* Higher register values = slower fans (the fan's strobe gates a counter). But this chip saturates back at 0, not at 255 like all the other chips. So, 0 means 0 RPM */ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 0; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 255); } #define FAN_FROM_REG(val,div) ((val)==0?0:(val)==255?0:1350000/((val)*(div))) /******** TEMP CONVERSIONS (Bob Dougherty) *********/ /* linear fits from HWMon.cpp (Copyright 1998-2000 Jonathan Teh Soon Yew) if(temp<169) return double(temp)*0.427-32.08; else if(temp>=169 && temp<=202) return double(temp)*0.582-58.16; else return double(temp)*0.924-127.33; A fifth-order polynomial fits the unofficial data (provided by Alex van Kaam <darkside@chello.nl>) a bit better. It also give more reasonable numbers on my machine (ie. they agree with what my BIOS tells me). Here's the fifth-order fit to the 8-bit data: temp = 1.625093e-10*val^5 - 1.001632e-07*val^4 + 2.457653e-05*val^3 - 2.967619e-03*val^2 + 2.175144e-01*val - 7.090067e+0. (2000-10-25- RFD: thanks to Uwe Andersen <uandersen@mayah.com> for finding my typos in this formula!) Alas, none of the elegant function-fit solutions will work because we aren't allowed to use floating point in the kernel and doing it with integers doesn't provide enough precision. So we'll do boring old look-up table stuff. The unofficial data (see below) have effectively 7-bit resolution (they are rounded to the nearest degree). I'm assuming that the transfer function of the device is monotonic and smooth, so a smooth function fit to the data will allow us to get better precision. I used the 5th-order poly fit described above and solved for VIA register values 0-255. I *10 before rounding, so we get tenth-degree precision. (I could have done all 1024 values for our 10-bit readings, but the function is very linear in the useful range (0-80 deg C), so we'll just use linear interpolation for 10-bit readings.) So, tempLUT is the temp at via register values 0-255: */ static const s16 tempLUT[] = { -709, -688, -667, -646, -627, -607, -589, -570, -553, -536, -519, -503, -487, -471, -456, -442, -428, -414, -400, -387, -375, -362, -350, -339, -327, -316, -305, -295, -285, -275, -265, -255, -246, -237, -229, -220, -212, -204, -196, -188, -180, -173, -166, -159, -152, -145, -139, -132, -126, -120, -114, -108, -102, -96, -91, -85, -80, -74, -69, -64, -59, -54, -49, -44, -39, -34, -29, -25, -20, -15, -11, -6, -2, 3, 7, 12, 16, 20, 25, 29, 33, 37, 42, 46, 50, 54, 59, 63, 67, 71, 75, 79, 84, 88, 92, 96, 100, 104, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 151, 155, 159, 163, 168, 172, 176, 181, 185, 189, 193, 198, 202, 206, 211, 215, 219, 224, 228, 232, 237, 241, 245, 250, 254, 259, 263, 267, 272, 276, 281, 285, 290, 294, 299, 303, 307, 312, 316, 321, 325, 330, 334, 339, 344, 348, 353, 357, 362, 366, 371, 376, 380, 385, 390, 395, 399, 404, 409, 414, 419, 423, 428, 433, 438, 443, 449, 454, 459, 464, 469, 475, 480, 486, 491, 497, 502, 508, 514, 520, 526, 532, 538, 544, 551, 557, 564, 571, 578, 584, 592, 599, 606, 614, 621, 629, 637, 645, 654, 662, 671, 680, 689, 698, 708, 718, 728, 738, 749, 759, 770, 782, 793, 805, 818, 830, 843, 856, 870, 883, 898, 912, 927, 943, 958, 975, 991, 1008, 1026, 1044, 1062, 1081, 1101, 1121, 1141, 1162, 1184, 1206, 1229, 1252, 1276, 1301, 1326, 1352, 1378, 1406, 1434, 1462 }; /* the original LUT values from Alex van Kaam <darkside@chello.nl> (for via register values 12-240): {-50,-49,-47,-45,-43,-41,-39,-38,-37,-35,-34,-33,-32,-31, -30,-29,-28,-27,-26,-25,-24,-24,-23,-22,-21,-20,-20,-19,-18,-17,-17,-16,-15, -15,-14,-14,-13,-12,-12,-11,-11,-10,-9,-9,-8,-8,-7,-7,-6,-6,-5,-5,-4,-4,-3, -3,-2,-2,-1,-1,0,0,1,1,1,3,3,3,4,4,4,5,5,5,6,6,7,7,8,8,9,9,9,10,10,11,11,12, 12,12,13,13,13,14,14,15,15,16,16,16,17,17,18,18,19,19,20,20,21,21,21,22,22, 22,23,23,24,24,25,25,26,26,26,27,27,27,28,28,29,29,30,30,30,31,31,32,32,33, 33,34,34,35,35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42,43,43,44,44,45, 45,46,46,47,48,48,49,49,50,51,51,52,52,53,53,54,55,55,56,57,57,58,59,59,60, 61,62,62,63,64,65,66,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,83,84, 85,86,88,89,91,92,94,96,97,99,101,103,105,107,109,110}; Here's the reverse LUT. I got it by doing a 6-th order poly fit (needed an extra term for a good fit to these inverse data!) and then solving for each temp value from -50 to 110 (the useable range for this chip). Here's the fit: viaRegVal = -1.160370e-10*val^6 +3.193693e-08*val^5 - 1.464447e-06*val^4 - 2.525453e-04*val^3 + 1.424593e-02*val^2 + 2.148941e+00*val +7.275808e+01) Note that n=161: */ static const u8 viaLUT[] = { 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18, 19, 20, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 36, 37, 39, 40, 41, 43, 45, 46, 48, 49, 51, 53, 55, 57, 59, 60, 62, 64, 66, 69, 71, 73, 75, 77, 79, 82, 84, 86, 88, 91, 93, 95, 98, 100, 103, 105, 107, 110, 112, 115, 117, 119, 122, 124, 126, 129, 131, 134, 136, 138, 140, 143, 145, 147, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 183, 185, 187, 188, 190, 192, 193, 195, 196, 198, 199, 200, 202, 203, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222, 223, 224, 225, 226, 226, 227, 228, 228, 229, 230, 230, 231, 232, 232, 233, 233, 234, 235, 235, 236, 236, 237, 237, 238, 238, 239, 239, 240 }; /* Converting temps to (8-bit) hyst and over registers No interpolation here. The +50 is because the temps start at -50 */ static inline u8 TEMP_TO_REG(long val) { return viaLUT[val <= -50000 ? 0 : val >= 110000 ? 160 : (val < 0 ? val - 500 : val + 500) / 1000 + 50]; } /* for 8-bit temperature hyst and over registers */ #define TEMP_FROM_REG(val) ((long)tempLUT[val] * 100) /* for 10-bit temperature readings */ static inline long TEMP_FROM_REG10(u16 val) { u16 eightBits = val >> 2; u16 twoBits = val & 3; /* no interpolation for these */ if (twoBits == 0 || eightBits == 255) return TEMP_FROM_REG(eightBits); /* do some linear interpolation */ return (tempLUT[eightBits] * (4 - twoBits) + tempLUT[eightBits + 1] * twoBits) * 25; } #define DIV_FROM_REG(val) (1 << (val)) #define DIV_TO_REG(val) ((val)==8?3:(val)==4?2:(val)==1?0:1) /* For each registered chip, we need to keep some data in memory. The structure is dynamically allocated. */ struct via686a_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[5]; /* Register value */ u8 in_max[5]; /* Register value */ u8 in_min[5]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u16 temp[3]; /* Register value 10 bit */ u8 temp_over[3]; /* Register value */ u8 temp_hyst[3]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 alarms; /* Register encoding, combined */ }; static struct pci_dev *s_bridge; /* pointer to the (only) via686a */ static int via686a_probe(struct platform_device *pdev); static int __devexit via686a_remove(struct platform_device *pdev); static inline int via686a_read_value(struct via686a_data *data, u8 reg) { return inb_p(data->addr + reg); } static inline void via686a_write_value(struct via686a_data *data, u8 reg, u8 value) { outb_p(value, data->addr + reg); } static struct via686a_data *via686a_update_device(struct device *dev); static void via686a_init_device(struct via686a_data *data); /* following are the sysfs callback functions */ /* 7 voltage sensors */ static ssize_t show_in(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr)); } static ssize_t show_in_min(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr)); } static ssize_t show_in_max(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; unsigned long val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, nr); via686a_write_value(data, VIA686A_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; unsigned long val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, nr); via686a_write_value(data, VIA686A_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); show_in_offset(0); show_in_offset(1); show_in_offset(2); show_in_offset(3); show_in_offset(4); /* 3 temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr])); } static ssize_t show_temp_over(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr])); } static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr])); } static ssize_t set_temp_over(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_over[nr] = TEMP_TO_REG(val); via686a_write_value(data, VIA686A_REG_TEMP_OVER[nr], data->temp_over[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_hyst[nr] = TEMP_TO_REG(val); via686a_write_value(data, VIA686A_REG_TEMP_HYST[nr], data->temp_hyst[nr]); mutex_unlock(&data->update_lock); return count; } #define show_temp_offset(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_over, set_temp_over, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_hyst, set_temp_hyst, offset - 1); show_temp_offset(1); show_temp_offset(2); show_temp_offset(3); /* 2 Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr])) ); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])) ); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *da, char *buf) { struct via686a_data *data = via686a_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]) ); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); via686a_write_value(data, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct via686a_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int nr = attr->index; int val = simple_strtol(buf, NULL, 10); int old; mutex_lock(&data->update_lock); old = via686a_read_value(data, VIA686A_REG_FANDIV); data->fan_div[nr] = DIV_TO_REG(val); old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); via686a_write_value(data, VIA686A_REG_FANDIV, old); mutex_unlock(&data->update_lock); return count; } #define show_fan_offset(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1); show_fan_offset(1); show_fan_offset(2); /* Alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct via686a_data *data = via686a_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct via686a_data *data = via686a_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct via686a_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *via686a_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group via686a_group = { .attrs = via686a_attributes, }; static struct platform_driver via686a_driver = { .driver = { .owner = THIS_MODULE, .name = "via686a", }, .probe = via686a_probe, .remove = __devexit_p(via686a_remove), }; /* This is called when the module is loaded */ static int __devinit via686a_probe(struct platform_device *pdev) { struct via686a_data *data; struct resource *res; int err; /* Reserve the ISA region */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, VIA686A_EXTENT, via686a_driver.driver.name)) { dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n", (unsigned long)res->start, (unsigned long)res->end); return -ENODEV; } if (!(data = kzalloc(sizeof(struct via686a_data), GFP_KERNEL))) { err = -ENOMEM; goto exit_release; } platform_set_drvdata(pdev, data); data->addr = res->start; data->name = "via686a"; mutex_init(&data->update_lock); /* Initialize the VIA686A chip */ via686a_init_device(data); /* Register sysfs hooks */ if ((err = sysfs_create_group(&pdev->dev.kobj, &via686a_group))) goto exit_free; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&pdev->dev.kobj, &via686a_group); exit_free: kfree(data); exit_release: release_region(res->start, VIA686A_EXTENT); return err; } static int __devexit via686a_remove(struct platform_device *pdev) { struct via686a_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &via686a_group); release_region(data->addr, VIA686A_EXTENT); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static void via686a_update_fan_div(struct via686a_data *data) { int reg = via686a_read_value(data, VIA686A_REG_FANDIV); data->fan_div[0] = (reg >> 4) & 0x03; data->fan_div[1] = reg >> 6; } static void __devinit via686a_init_device(struct via686a_data *data) { u8 reg; /* Start monitoring */ reg = via686a_read_value(data, VIA686A_REG_CONFIG); via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F); /* Configure temp interrupt mode for continuous-interrupt operation */ reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE); via686a_write_value(data, VIA686A_REG_TEMP_MODE, (reg & ~VIA686A_TEMP_MODE_MASK) | VIA686A_TEMP_MODE_CONTINUOUS); /* Pre-read fan clock divisor values */ via686a_update_fan_div(data); } static struct via686a_data *via686a_update_device(struct device *dev) { struct via686a_data *data = dev_get_drvdata(dev); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i <= 4; i++) { data->in[i] = via686a_read_value(data, VIA686A_REG_IN(i)); data->in_min[i] = via686a_read_value(data, VIA686A_REG_IN_MIN (i)); data->in_max[i] = via686a_read_value(data, VIA686A_REG_IN_MAX(i)); } for (i = 1; i <= 2; i++) { data->fan[i - 1] = via686a_read_value(data, VIA686A_REG_FAN(i)); data->fan_min[i - 1] = via686a_read_value(data, VIA686A_REG_FAN_MIN(i)); } for (i = 0; i <= 2; i++) { data->temp[i] = via686a_read_value(data, VIA686A_REG_TEMP[i]) << 2; data->temp_over[i] = via686a_read_value(data, VIA686A_REG_TEMP_OVER[i]); data->temp_hyst[i] = via686a_read_value(data, VIA686A_REG_TEMP_HYST[i]); } /* add in lower 2 bits temp1 uses bits 7-6 of VIA686A_REG_TEMP_LOW1 temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23 temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23 */ data->temp[0] |= (via686a_read_value(data, VIA686A_REG_TEMP_LOW1) & 0xc0) >> 6; data->temp[1] |= (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) & 0x30) >> 4; data->temp[2] |= (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) & 0xc0) >> 6; via686a_update_fan_div(data); data->alarms = via686a_read_value(data, VIA686A_REG_ALARM1) | (via686a_read_value(data, VIA686A_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static const struct pci_device_id via686a_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, { 0, } }; MODULE_DEVICE_TABLE(pci, via686a_pci_ids); static int __devinit via686a_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + VIA686A_EXTENT - 1, .name = "via686a", .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc("via686a", address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __devinit via686a_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 address, val; if (force_addr) { address = force_addr & ~(VIA686A_EXTENT - 1); dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VIA686A_BASE_REG, address | 1)) return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VIA686A_BASE_REG, &val)) return -ENODEV; address = val & ~(VIA686A_EXTENT - 1); if (address == 0) { dev_err(&dev->dev, "base address not set - upgrade BIOS " "or use force_addr=0xaddr\n"); return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VIA686A_ENABLE_REG, &val)) return -ENODEV; if (!(val & 0x0001)) { if (!force_addr) { dev_warn(&dev->dev, "Sensors disabled, enable " "with force_addr=0x%x\n", address); return -ENODEV; } dev_warn(&dev->dev, "Enabling sensors\n"); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VIA686A_ENABLE_REG, val | 0x0001)) return -ENODEV; } if (platform_driver_register(&via686a_driver)) goto exit; /* Sets global pdev as a side effect */ if (via686a_device_add(address)) goto exit_unregister; /* Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ s_bridge = pci_dev_get(dev); return -ENODEV; exit_unregister: platform_driver_unregister(&via686a_driver); exit: return -ENODEV; } static struct pci_driver via686a_pci_driver = { .name = "via686a", .id_table = via686a_pci_ids, .probe = via686a_pci_probe, }; static int __init sm_via686a_init(void) { return pci_register_driver(&via686a_pci_driver); } static void __exit sm_via686a_exit(void) { pci_unregister_driver(&via686a_pci_driver); if (s_bridge != NULL) { platform_device_unregister(pdev); platform_driver_unregister(&via686a_driver); pci_dev_put(s_bridge); s_bridge = NULL; } } MODULE_AUTHOR("Kyösti Mälkki <kmalkki@cc.hut.fi>, " "Mark Studebaker <mdsxyz123@yahoo.com> " "and Bob Dougherty <bobd@stanford.edu>"); MODULE_DESCRIPTION("VIA 686A Sensor device"); MODULE_LICENSE("GPL"); module_init(sm_via686a_init); module_exit(sm_via686a_exit);
gpl-2.0
thornbirdblue/MI3_kernel_code
arch/arm/mach-iop32x/iq31244.c
4826
7695
/* * arch/arm/mach-iop32x/iq31244.c * * Board support code for the Intel EP80219 and IQ31244 platforms. * * Author: Rory Bolt <rorybolt@pacbell.net> * Copyright (C) 2002 Rory Bolt * Copyright 2003 (c) MontaVista, Software, Inc. * Copyright (C) 2004 Intel Corp. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/string.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/cputype.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/pci.h> #include <asm/mach/time.h> #include <asm/mach-types.h> #include <asm/page.h> #include <asm/pgtable.h> #include <mach/time.h> /* * Until March of 2007 iq31244 platforms and ep80219 platforms shared the * same machine id, and the processor type was used to select board type. * However this assumption breaks for an iq80219 board which is an iop219 * processor on an iq31244 board. The force_ep80219 flag has been added * for old boot loaders using the iq31244 machine id for an ep80219 platform. */ static int force_ep80219; static int is_80219(void) { return !!((read_cpuid_id() & 0xffffffe0) == 0x69052e20); } static int is_ep80219(void) { if (machine_is_ep80219() || force_ep80219) return 1; else return 0; } /* * EP80219/IQ31244 timer tick configuration. */ static void __init iq31244_timer_init(void) { if (is_ep80219()) { /* 33.333 MHz crystal. */ iop_init_time(200000000); } else { /* 33.000 MHz crystal. */ iop_init_time(198000000); } } static struct sys_timer iq31244_timer = { .init = iq31244_timer_init, }; /* * IQ31244 I/O. */ static struct map_desc iq31244_io_desc[] __initdata = { { /* on-board devices */ .virtual = IQ31244_UART, .pfn = __phys_to_pfn(IQ31244_UART), .length = 0x00100000, .type = MT_DEVICE, }, }; void __init iq31244_map_io(void) { iop3xx_map_io(); iotable_init(iq31244_io_desc, ARRAY_SIZE(iq31244_io_desc)); } /* * EP80219/IQ31244 PCI. */ static int __init ep80219_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; if (slot == 0) { /* CFlash */ irq = IRQ_IOP32X_XINT1; } else if (slot == 1) { /* 82551 Pro 100 */ irq = IRQ_IOP32X_XINT0; } else if (slot == 2) { /* PCI-X Slot */ irq = IRQ_IOP32X_XINT3; } else if (slot == 3) { /* SATA */ irq = IRQ_IOP32X_XINT2; } else { printk(KERN_ERR "ep80219_pci_map_irq() called for unknown " "device PCI:%d:%d:%d\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); irq = -1; } return irq; } static struct hw_pci ep80219_pci __initdata = { .swizzle = pci_std_swizzle, .nr_controllers = 1, .setup = iop3xx_pci_setup, .preinit = iop3xx_pci_preinit, .scan = iop3xx_pci_scan_bus, .map_irq = ep80219_pci_map_irq, }; static int __init iq31244_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; if (slot == 0) { /* CFlash */ irq = IRQ_IOP32X_XINT1; } else if (slot == 1) { /* SATA */ irq = IRQ_IOP32X_XINT2; } else if (slot == 2) { /* PCI-X Slot */ irq = IRQ_IOP32X_XINT3; } else if (slot == 3) { /* 82546 GigE */ irq = IRQ_IOP32X_XINT0; } else { printk(KERN_ERR "iq31244_pci_map_irq called for unknown " "device PCI:%d:%d:%d\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); irq = -1; } return irq; } static struct hw_pci iq31244_pci __initdata = { .swizzle = pci_std_swizzle, .nr_controllers = 1, .setup = iop3xx_pci_setup, .preinit = iop3xx_pci_preinit, .scan = iop3xx_pci_scan_bus, .map_irq = iq31244_pci_map_irq, }; static int __init iq31244_pci_init(void) { if (is_ep80219()) pci_common_init(&ep80219_pci); else if (machine_is_iq31244()) { if (is_80219()) { printk("note: iq31244 board type has been selected\n"); printk("note: to select ep80219 operation:\n"); printk("\t1/ specify \"force_ep80219\" on the kernel" " command line\n"); printk("\t2/ update boot loader to pass" " the ep80219 id: %d\n", MACH_TYPE_EP80219); } pci_common_init(&iq31244_pci); } return 0; } subsys_initcall(iq31244_pci_init); /* * IQ31244 machine initialisation. */ static struct physmap_flash_data iq31244_flash_data = { .width = 2, }; static struct resource iq31244_flash_resource = { .start = 0xf0000000, .end = 0xf07fffff, .flags = IORESOURCE_MEM, }; static struct platform_device iq31244_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &iq31244_flash_data, }, .num_resources = 1, .resource = &iq31244_flash_resource, }; static struct plat_serial8250_port iq31244_serial_port[] = { { .mapbase = IQ31244_UART, .membase = (char *)IQ31244_UART, .irq = IRQ_IOP32X_XINT1, .flags = UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 0, .uartclk = 1843200, }, { }, }; static struct resource iq31244_uart_resource = { .start = IQ31244_UART, .end = IQ31244_UART + 7, .flags = IORESOURCE_MEM, }; static struct platform_device iq31244_serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = iq31244_serial_port, }, .num_resources = 1, .resource = &iq31244_uart_resource, }; /* * This function will send a SHUTDOWN_COMPLETE message to the PIC * controller over I2C. We are not using the i2c subsystem since * we are going to power off and it may be removed */ void ep80219_power_off(void) { /* * Send the Address byte w/ the start condition */ *IOP3XX_IDBR1 = 0x60; *IOP3XX_ICR1 = 0xE9; mdelay(1); /* * Send the START_MSG byte w/ no start or stop condition */ *IOP3XX_IDBR1 = 0x0F; *IOP3XX_ICR1 = 0xE8; mdelay(1); /* * Send the SHUTDOWN_COMPLETE Message ID byte w/ no start or * stop condition */ *IOP3XX_IDBR1 = 0x03; *IOP3XX_ICR1 = 0xE8; mdelay(1); /* * Send an ignored byte w/ stop condition */ *IOP3XX_IDBR1 = 0x00; *IOP3XX_ICR1 = 0xEA; while (1) ; } static void __init iq31244_init_machine(void) { platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iq31244_flash_device); platform_device_register(&iq31244_serial_device); platform_device_register(&iop3xx_dma_0_channel); platform_device_register(&iop3xx_dma_1_channel); if (is_ep80219()) pm_power_off = ep80219_power_off; if (!is_80219()) platform_device_register(&iop3xx_aau_channel); } static int __init force_ep80219_setup(char *str) { force_ep80219 = 1; return 1; } __setup("force_ep80219", force_ep80219_setup); MACHINE_START(IQ31244, "Intel IQ31244") /* Maintainer: Intel Corp. */ .atag_offset = 0x100, .map_io = iq31244_map_io, .init_irq = iop32x_init_irq, .timer = &iq31244_timer, .init_machine = iq31244_init_machine, .restart = iop3xx_restart, MACHINE_END /* There should have been an ep80219 machine identifier from the beginning. * Boot roms older than March 2007 do not know the ep80219 machine id. Pass * "force_ep80219" on the kernel command line, otherwise iq31244 operation * will be selected. */ MACHINE_START(EP80219, "Intel EP80219") /* Maintainer: Intel Corp. */ .atag_offset = 0x100, .map_io = iq31244_map_io, .init_irq = iop32x_init_irq, .timer = &iq31244_timer, .init_machine = iq31244_init_machine, .restart = iop3xx_restart, MACHINE_END
gpl-2.0
garwedgess/LuPuS_honami_stock
arch/arm/mach-s5p64x0/irq-pm.c
7898
2352
/* linux/arch/arm/mach-s5p64x0/irq-pm.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5P64X0 - Interrupt handling Power Management * * Based on arch/arm/mach-s3c64xx/irq-pm.c by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/syscore_ops.h> #include <linux/serial_core.h> #include <linux/io.h> #include <plat/regs-serial.h> #include <plat/pm.h> #include <mach/regs-gpio.h> static struct sleep_save irq_save[] = { SAVE_ITEM(S5P64X0_EINT0CON0), SAVE_ITEM(S5P64X0_EINT0FLTCON0), SAVE_ITEM(S5P64X0_EINT0FLTCON1), SAVE_ITEM(S5P64X0_EINT0MASK), }; static struct irq_grp_save { u32 con; u32 fltcon; u32 mask; } eint_grp_save[4]; static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; static int s5p64x0_irq_pm_suspend(void) { struct irq_grp_save *grp = eint_grp_save; int i; S3C_PMDBG("%s: suspending IRQs\n", __func__); s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save)); for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++) irq_uart_mask[i] = __raw_readl(S3C_VA_UARTx(i) + S3C64XX_UINTM); for (i = 0; i < ARRAY_SIZE(eint_grp_save); i++, grp++) { grp->con = __raw_readl(S5P64X0_EINT12CON + (i * 4)); grp->mask = __raw_readl(S5P64X0_EINT12MASK + (i * 4)); grp->fltcon = __raw_readl(S5P64X0_EINT12FLTCON + (i * 4)); } return 0; } static void s5p64x0_irq_pm_resume(void) { struct irq_grp_save *grp = eint_grp_save; int i; S3C_PMDBG("%s: resuming IRQs\n", __func__); s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++) __raw_writel(irq_uart_mask[i], S3C_VA_UARTx(i) + S3C64XX_UINTM); for (i = 0; i < ARRAY_SIZE(eint_grp_save); i++, grp++) { __raw_writel(grp->con, S5P64X0_EINT12CON + (i * 4)); __raw_writel(grp->mask, S5P64X0_EINT12MASK + (i * 4)); __raw_writel(grp->fltcon, S5P64X0_EINT12FLTCON + (i * 4)); } S3C_PMDBG("%s: IRQ configuration restored\n", __func__); } static struct syscore_ops s5p64x0_irq_syscore_ops = { .suspend = s5p64x0_irq_pm_suspend, .resume = s5p64x0_irq_pm_resume, }; static int __init s5p64x0_syscore_init(void) { register_syscore_ops(&s5p64x0_irq_syscore_ops); return 0; } core_initcall(s5p64x0_syscore_init);
gpl-2.0
thh/ubuntu-trusty
net/rds/iw_send.c
8410
29857
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/device.h> #include <linux/dmapool.h> #include <linux/ratelimit.h> #include "rds.h" #include "iw.h" static void rds_iw_send_rdma_complete(struct rds_message *rm, int wc_status) { int notify_status; switch (wc_status) { case IB_WC_WR_FLUSH_ERR: return; case IB_WC_SUCCESS: notify_status = RDS_RDMA_SUCCESS; break; case IB_WC_REM_ACCESS_ERR: notify_status = RDS_RDMA_REMOTE_ERROR; break; default: notify_status = RDS_RDMA_OTHER_ERROR; break; } rds_rdma_send_complete(rm, notify_status); } static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, struct rm_rdma_op *op) { if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_mapped = 0; } } static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, struct rds_iw_send_work *send, int wc_status) { struct rds_message *rm = send->s_rm; rdsdebug("ic %p send %p rm %p\n", ic, send, rm); ib_dma_unmap_sg(ic->i_cm_id->device, rm->data.op_sg, rm->data.op_nents, DMA_TO_DEVICE); if (rm->rdma.op_active) { rds_iw_send_unmap_rdma(ic, &rm->rdma); /* If the user asked for a completion notification on this * message, we can implement three different semantics: * 1. Notify when we received the ACK on the RDS message * that was queued with the RDMA. This provides reliable * notification of RDMA status at the expense of a one-way * packet delay. * 2. Notify when the IB stack gives us the completion event for * the RDMA operation. * 3. Notify when the IB stack gives us the completion event for * the accompanying RDS messages. * Here, we implement approach #3. To implement approach #2, * call rds_rdma_send_complete from the cq_handler. To implement #1, * don't call rds_rdma_send_complete at all, and fall back to the notify * handling in the ACK processing code. * * Note: There's no need to explicitly sync any RDMA buffers using * ib_dma_sync_sg_for_cpu - the completion for the RDMA * operation itself unmapped the RDMA buffers, which takes care * of synching. */ rds_iw_send_rdma_complete(rm, wc_status); if (rm->rdma.op_write) rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes); else rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes); } /* If anyone waited for this message to get flushed out, wake * them up now */ rds_message_unmapped(rm); rds_message_put(rm); send->s_rm = NULL; } void rds_iw_send_init_ring(struct rds_iw_connection *ic) { struct rds_iw_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { struct ib_sge *sge; send->s_rm = NULL; send->s_op = NULL; send->s_mapping = NULL; send->s_wr.next = NULL; send->s_wr.wr_id = i; send->s_wr.sg_list = send->s_sge; send->s_wr.num_sge = 1; send->s_wr.opcode = IB_WR_SEND; send->s_wr.send_flags = 0; send->s_wr.ex.imm_data = 0; sge = rds_iw_data_sge(ic, send->s_sge); sge->lkey = 0; sge = rds_iw_header_sge(ic, send->s_sge); sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = 0; send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size); if (IS_ERR(send->s_mr)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n"); break; } send->s_page_list = ib_alloc_fast_reg_page_list( ic->i_cm_id->device, fastreg_message_size); if (IS_ERR(send->s_page_list)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed\n"); break; } } } void rds_iw_send_clear_ring(struct rds_iw_connection *ic) { struct rds_iw_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { BUG_ON(!send->s_mr); ib_dereg_mr(send->s_mr); BUG_ON(!send->s_page_list); ib_free_fast_reg_page_list(send->s_page_list); if (send->s_wr.opcode == 0xdead) continue; if (send->s_rm) rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); if (send->s_op) rds_iw_send_unmap_rdma(ic, send->s_op); } } /* * The _oldest/_free ring operations here race cleanly with the alloc/unalloc * operations performed in the send path. As the sender allocs and potentially * unallocs the next free entry in the ring it doesn't alter which is * the next to be freed, which is what this is concerned with. */ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) { struct rds_connection *conn = context; struct rds_iw_connection *ic = conn->c_transport_data; struct ib_wc wc; struct rds_iw_send_work *send; u32 completed; u32 oldest; u32 i; int ret; rdsdebug("cq %p conn %p\n", cq, conn); rds_iw_stats_inc(s_iw_tx_cq_call); ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); if (ret) rdsdebug("ib_req_notify_cq send failed: %d\n", ret); while (ib_poll_cq(cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_iw_stats_inc(s_iw_tx_cq_event); if (wc.status != IB_WC_SUCCESS) { printk(KERN_ERR "WC Error: status = %d opcode = %d\n", wc.status, wc.opcode); break; } if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) { ic->i_fastreg_posted = 0; continue; } if (wc.opcode == IB_WC_FAST_REG_MR && wc.wr_id == RDS_IW_FAST_REG_WR_ID) { ic->i_fastreg_posted = 1; continue; } if (wc.wr_id == RDS_IW_ACK_WR_ID) { if (ic->i_ack_queued + HZ/2 < jiffies) rds_iw_stats_inc(s_iw_tx_stalled); rds_iw_ack_send_complete(ic); continue; } oldest = rds_iw_ring_oldest(&ic->i_send_ring); completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); for (i = 0; i < completed; i++) { send = &ic->i_sends[oldest]; /* In the error case, wc.opcode sometimes contains garbage */ switch (send->s_wr.opcode) { case IB_WR_SEND: if (send->s_rm) rds_iw_send_unmap_rm(ic, send, wc.status); break; case IB_WR_FAST_REG_MR: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: /* Nothing to be done - the SG list will be unmapped * when the SEND completes. */ break; default: printk_ratelimited(KERN_NOTICE "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", __func__, send->s_wr.opcode); break; } send->s_wr.opcode = 0xdead; send->s_wr.num_sge = 1; if (send->s_queued + HZ/2 < jiffies) rds_iw_stats_inc(s_iw_tx_stalled); /* If a RDMA operation produced an error, signal this right * away. If we don't, the subsequent SEND that goes with this * RDMA will be canceled with ERR_WFLUSH, and the application * never learn that the RDMA failed. */ if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { struct rds_message *rm; rm = rds_send_get_message(conn, send->s_op); if (rm) rds_iw_send_rdma_complete(rm, wc.status); } oldest = (oldest + 1) % ic->i_send_ring.w_nr; } rds_iw_ring_free(&ic->i_send_ring, completed); if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || test_bit(0, &conn->c_map_queued)) queue_delayed_work(rds_wq, &conn->c_send_w, 0); /* We expect errors as the qp is drained during shutdown */ if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { rds_iw_conn_error(conn, "send completion on %pI4 " "had status %u, disconnecting and reconnecting\n", &conn->c_faddr, wc.status); } } } /* * This is the main function for allocating credits when sending * messages. * * Conceptually, we have two counters: * - send credits: this tells us how many WRs we're allowed * to submit without overruning the receiver's queue. For * each SEND WR we post, we decrement this by one. * * - posted credits: this tells us how many WRs we recently * posted to the receive queue. This value is transferred * to the peer as a "credit update" in a RDS header field. * Every time we transmit credits to the peer, we subtract * the amount of transferred credits from this counter. * * It is essential that we avoid situations where both sides have * exhausted their send credits, and are unable to send new credits * to the peer. We achieve this by requiring that we send at least * one credit update to the peer before exhausting our credits. * When new credits arrive, we subtract one credit that is withheld * until we've posted new buffers and are ready to transmit these * credits (see rds_iw_send_add_credits below). * * The RDS send code is essentially single-threaded; rds_send_xmit * grabs c_send_lock to ensure exclusive access to the send ring. * However, the ACK sending code is independent and can race with * message SENDs. * * In the send path, we need to update the counters for send credits * and the counter of posted buffers atomically - when we use the * last available credit, we cannot allow another thread to race us * and grab the posted credits counter. Hence, we have to use a * spinlock to protect the credit counter, or use atomics. * * Spinlocks shared between the send and the receive path are bad, * because they create unnecessary delays. An early implementation * using a spinlock showed a 5% degradation in throughput at some * loads. * * This implementation avoids spinlocks completely, putting both * counters into a single atomic, and updating that atomic using * atomic_add (in the receive path, when receiving fresh credits), * and using atomic_cmpxchg when updating the two counters. */ int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, u32 *adv_credits, int need_posted, int max_posted) { unsigned int avail, posted, got = 0, advertise; long oldval, newval; *adv_credits = 0; if (!ic->i_flowctl) return wanted; try_again: advertise = 0; oldval = newval = atomic_read(&ic->i_credits); posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ if (avail && !posted) avail--; if (avail < wanted) { struct rds_connection *conn = ic->i_cm_id->context; /* Oops, there aren't that many credits left! */ set_bit(RDS_LL_SEND_FULL, &conn->c_flags); got = avail; } else { /* Sometimes you get what you want, lalala. */ got = wanted; } newval -= IB_SET_SEND_CREDITS(got); /* * If need_posted is non-zero, then the caller wants * the posted regardless of whether any send credits are * available. */ if (posted && (got || need_posted)) { advertise = min_t(unsigned int, posted, max_posted); newval -= IB_SET_POST_CREDITS(advertise); } /* Finally bill everything */ if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) goto try_again; *adv_credits = advertise; return got; } void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits) { struct rds_iw_connection *ic = conn->c_transport_data; if (credits == 0) return; rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n", credits, IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) queue_delayed_work(rds_wq, &conn->c_send_w, 0); WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); rds_iw_stats_inc(s_iw_rx_credit_updates); } void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted) { struct rds_iw_connection *ic = conn->c_transport_data; if (posted == 0) return; atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); /* Decide whether to send an update to the peer now. * If we would send a credit update for every single buffer we * post, we would end up with an ACK storm (ACK arrives, * consumes buffer, we refill the ring, send ACK to remote * advertising the newly posted buffer... ad inf) * * Performance pretty much depends on how often we send * credit updates - too frequent updates mean lots of ACKs. * Too infrequent updates, and the peer will run out of * credits and has to throttle. * For the time being, 16 seems to be a good compromise. */ if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } static inline void rds_iw_xmit_populate_wr(struct rds_iw_connection *ic, struct rds_iw_send_work *send, unsigned int pos, unsigned long buffer, unsigned int length, int send_flags) { struct ib_sge *sge; WARN_ON(pos != send - ic->i_sends); send->s_wr.send_flags = send_flags; send->s_wr.opcode = IB_WR_SEND; send->s_wr.num_sge = 2; send->s_wr.next = NULL; send->s_queued = jiffies; send->s_op = NULL; if (length != 0) { sge = rds_iw_data_sge(ic, send->s_sge); sge->addr = buffer; sge->length = length; sge->lkey = rds_iw_local_dma_lkey(ic); sge = rds_iw_header_sge(ic, send->s_sge); } else { /* We're sending a packet with no payload. There is only * one SGE */ send->s_wr.num_sge = 1; sge = &send->s_sge[0]; } sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = rds_iw_local_dma_lkey(ic); } /* * This can be called multiple times for a given message. The first time * we see a message we map its scatterlist into the IB device so that * we can provide that mapped address to the IB scatter gather entries * in the IB work requests. We translate the scatterlist into a series * of work requests that fragment the message. These work requests complete * in order so we pass ownership of the message to the completion handler * once we send the final fragment. * * The RDS core uses the c_send_lock to only enter this function once * per connection. This makes sure that the tx ring alloc/unalloc pairs * don't get out of sync and confuse the ring. */ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { struct rds_iw_connection *ic = conn->c_transport_data; struct ib_device *dev = ic->i_cm_id->device; struct rds_iw_send_work *send = NULL; struct rds_iw_send_work *first; struct rds_iw_send_work *prev; struct ib_send_wr *failed_wr; struct scatterlist *scat; u32 pos; u32 i; u32 work_alloc; u32 credit_alloc; u32 posted; u32 adv_credits = 0; int send_flags = 0; int sent; int ret; int flow_controlled = 0; BUG_ON(off % RDS_FRAG_SIZE); BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); /* Fastreg support */ if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) { ret = -EAGAIN; goto out; } /* FIXME we may overallocate here */ if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) i = 1; else i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc == 0) { set_bit(RDS_LL_SEND_FULL, &conn->c_flags); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } credit_alloc = work_alloc; if (ic->i_flowctl) { credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); adv_credits += posted; if (credit_alloc < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); work_alloc = credit_alloc; flow_controlled++; } if (work_alloc == 0) { set_bit(RDS_LL_SEND_FULL, &conn->c_flags); rds_iw_stats_inc(s_iw_tx_throttle); ret = -ENOMEM; goto out; } } /* map the message the first time we see it */ if (!ic->i_rm) { /* printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", be16_to_cpu(rm->m_inc.i_hdr.h_dport), rm->m_inc.i_hdr.h_flags, be32_to_cpu(rm->m_inc.i_hdr.h_len)); */ if (rm->data.op_nents) { rm->data.op_count = ib_dma_map_sg(dev, rm->data.op_sg, rm->data.op_nents, DMA_TO_DEVICE); rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); if (rm->data.op_count == 0) { rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); ret = -ENOMEM; /* XXX ? */ goto out; } } else { rm->data.op_count = 0; } ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; rds_message_addref(rm); ic->i_rm = rm; /* Finalize the header */ if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; /* If it has a RDMA op, tell the peer we did it. This is * used by the peer to release use-once RDMA MRs. */ if (rm->rdma.op_active) { struct rds_ext_header_rdma ext_hdr; ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); rds_message_add_extension(&rm->m_inc.i_hdr, RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); } if (rm->m_rdma_cookie) { rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, rds_rdma_cookie_key(rm->m_rdma_cookie), rds_rdma_cookie_offset(rm->m_rdma_cookie)); } /* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so * we should not do this unless we have a chance of at least * sticking the header into the send ring. Which is why we * should call rds_iw_ring_alloc first. */ rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_iw_piggyb_ack(ic)); rds_message_make_checksum(&rm->m_inc.i_hdr); /* * Update adv_credits since we reset the ACK_REQUIRED bit. */ rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); adv_credits += posted; BUG_ON(adv_credits > 255); } send = &ic->i_sends[pos]; first = send; prev = NULL; scat = &rm->data.op_sg[sg]; sent = 0; i = 0; /* Sometimes you want to put a fence between an RDMA * READ and the following SEND. * We could either do this all the time * or when requested by the user. Right now, we let * the application choose. */ if (rm->rdma.op_active && rm->rdma.op_fence) send_flags = IB_SEND_FENCE; /* * We could be copying the header into the unused tail of the page. * That would need to be changed in the future when those pages might * be mapped userspace pages or page cache pages. So instead we always * use a second sge and our long-lived ring of mapped headers. We send * the header after the data so that the data payload can be aligned on * the receiver. */ /* handle a 0-len message */ if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); goto add_header; } /* if there's data reference it with a chain of work reqs */ for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) { unsigned int len; send = &ic->i_sends[pos]; len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); rds_iw_xmit_populate_wr(ic, send, pos, ib_sg_dma_address(dev, scat) + off, len, send_flags); /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time * on the wire. */ if (ic->i_unsignaled_wrs-- == 0) { ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; } ic->i_unsignaled_bytes -= len; if (ic->i_unsignaled_bytes <= 0) { ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* * Always signal the last one if we're stopping due to flow control. */ if (flow_controlled && i == (work_alloc-1)) send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); sent += len; off += len; if (off == ib_sg_dma_len(dev, scat)) { scat++; off = 0; } add_header: /* Tack on the header after the data. The header SGE should already * have been set up to point to the right header buffer. */ memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); if (0) { struct rds_header *hdr = &ic->i_send_hdrs[pos]; printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n", be16_to_cpu(hdr->h_dport), hdr->h_flags, be32_to_cpu(hdr->h_len)); } if (adv_credits) { struct rds_header *hdr = &ic->i_send_hdrs[pos]; /* add credit and redo the header checksum */ hdr->h_credit = adv_credits; rds_message_make_checksum(hdr); adv_credits = 0; rds_iw_stats_inc(s_iw_tx_credit_updates); } if (prev) prev->s_wr.next = &send->s_wr; prev = send; pos = (pos + 1) % ic->i_send_ring.w_nr; } /* Account the RDS header in the number of bytes we sent, but just once. * The caller has no concept of fragmentation. */ if (hdr_off == 0) sent += sizeof(struct rds_header); /* if we finished the message then send completion owns it */ if (scat == &rm->data.op_sg[rm->data.op_count]) { prev->s_rm = ic->i_rm; prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; ic->i_rm = NULL; } if (i < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } if (ic->i_flowctl && i < credit_alloc) rds_iw_send_add_credits(conn, credit_alloc - i); /* XXX need to worry about failed_wr and partial sends. */ failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); if (prev->s_rm) { ic->i_rm = prev->s_rm; prev->s_rm = NULL; } goto out; } ret = sent; out: BUG_ON(adv_credits); return ret; } static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len, u64 sg_addr) { BUG_ON(nent > send->s_page_list->max_page_list_len); /* * Perform a WR for the fast_reg_mr. Each individual page * in the sg list is added to the fast reg page list and placed * inside the fast_reg_mr WR. */ send->s_wr.opcode = IB_WR_FAST_REG_MR; send->s_wr.wr.fast_reg.length = len; send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; send->s_wr.wr.fast_reg.page_list = send->s_page_list; send->s_wr.wr.fast_reg.page_list_len = nent; send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; send->s_wr.wr.fast_reg.iova_start = sg_addr; ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); } int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) { struct rds_iw_connection *ic = conn->c_transport_data; struct rds_iw_send_work *send = NULL; struct rds_iw_send_work *first; struct rds_iw_send_work *prev; struct ib_send_wr *failed_wr; struct rds_iw_device *rds_iwdev; struct scatterlist *scat; unsigned long len; u64 remote_addr = op->op_remote_addr; u32 pos, fr_pos; u32 work_alloc; u32 i; u32 j; int sent; int ret; int num_sge; rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); /* map the message the first time we see it */ if (!op->op_mapped) { op->op_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, (op->op_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); if (op->op_count == 0) { rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } op->op_mapped = 1; } if (!op->op_write) { /* Alloc space on the send queue for the fastreg */ work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); if (work_alloc != 1) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } } /* * Instead of knowing how to return a partial rdma read/write we insist that there * be enough work requests to send the entire message. */ i = ceil(op->op_count, rds_iwdev->max_sge); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc != i) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } send = &ic->i_sends[pos]; if (!op->op_write) { first = prev = &ic->i_sends[fr_pos]; } else { first = send; prev = NULL; } scat = &op->op_sg[0]; sent = 0; num_sge = op->op_count; for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { send->s_wr.send_flags = 0; send->s_queued = jiffies; /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time on the wire. */ if (ic->i_unsignaled_wrs-- == 0) { ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; send->s_wr.send_flags = IB_SEND_SIGNALED; } /* To avoid the need to have the plumbing to invalidate the fastreg_mr used * for local access after RDS is finished with it, using * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. */ if (op->op_write) send->s_wr.opcode = IB_WR_RDMA_WRITE; else send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.rkey = op->op_rkey; send->s_op = op; if (num_sge > rds_iwdev->max_sge) { send->s_wr.num_sge = rds_iwdev->max_sge; num_sge -= rds_iwdev->max_sge; } else send->s_wr.num_sge = num_sge; send->s_wr.next = NULL; if (prev) prev->s_wr.next = &send->s_wr; for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { len = ib_sg_dma_len(ic->i_cm_id->device, scat); if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); else { send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); send->s_sge[j].length = len; send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic); } sent += len; rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); remote_addr += len; scat++; } if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { send->s_wr.num_sge = 1; send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; } rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); prev = send; if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) send = ic->i_sends; } /* if we finished the message then send completion owns it */ if (scat == &op->op_sg[op->op_count]) first->s_wr.send_flags = IB_SEND_SIGNALED; if (i < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } /* On iWARP, local memory access by a remote system (ie, RDMA Read) is not * recommended. Putting the lkey on the wire is a security hole, as it can * allow for memory access to all of memory on the remote system. Some * adapters do not allow using the lkey for this at all. To bypass this use a * fastreg_mr (or possibly a dma_mr) */ if (!op->op_write) { rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); work_alloc++; } failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); goto out; } out: return ret; } void rds_iw_xmit_complete(struct rds_connection *conn) { struct rds_iw_connection *ic = conn->c_transport_data; /* We may have a pending ACK or window update we were unable * to send previously (due to flow control). Try again. */ rds_iw_attempt_ack(ic); }
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-4.14/arch/s390/kernel/idle.c
219
3398
// SPDX-License-Identifier: GPL-2.0 /* * Idle functions for s390. * * Copyright IBM Corp. 2014 * * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/kprobes.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/sched/cputime.h> #include <asm/nmi.h> #include <asm/smp.h> #include "entry.h" static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); void enabled_wait(void) { struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); unsigned long long idle_time; unsigned long psw_mask; trace_hardirqs_on(); /* Wait for external, I/O or machine check interrupt. */ psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; clear_cpu_flag(CIF_NOHZ_DELAY); /* Call the assembler magic in entry.S */ psw_idle(idle, psw_mask); trace_hardirqs_off(); /* Account time spent with enabled wait psw loaded as idle time. */ write_seqcount_begin(&idle->seqcount); idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->idle_time += idle_time; idle->idle_count++; account_idle_time(cputime_to_nsecs(idle_time)); write_seqcount_end(&idle->seqcount); } NOKPROBE_SYMBOL(enabled_wait); static ssize_t show_idle_count(struct device *dev, struct device_attribute *attr, char *buf) { struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); unsigned long long idle_count; unsigned int seq; do { seq = read_seqcount_begin(&idle->seqcount); idle_count = READ_ONCE(idle->idle_count); if (READ_ONCE(idle->clock_idle_enter)) idle_count++; } while (read_seqcount_retry(&idle->seqcount, seq)); return sprintf(buf, "%llu\n", idle_count); } DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); static ssize_t show_idle_time(struct device *dev, struct device_attribute *attr, char *buf) { struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); unsigned long long now, idle_time, idle_enter, idle_exit; unsigned int seq; do { now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_time = READ_ONCE(idle->idle_time); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; return sprintf(buf, "%llu\n", idle_time >> 12); } DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); u64 arch_cpu_idle_time(int cpu) { struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); unsigned long long now, idle_enter, idle_exit; unsigned int seq; do { now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); } void arch_cpu_idle_enter(void) { local_mcck_disable(); } void arch_cpu_idle(void) { if (!test_cpu_flag(CIF_MCCK_PENDING)) /* Halt the cpu and keep track of cpu time accounting. */ enabled_wait(); local_irq_enable(); } void arch_cpu_idle_exit(void) { local_mcck_enable(); if (test_cpu_flag(CIF_MCCK_PENDING)) s390_handle_mcck(); } void arch_cpu_idle_dead(void) { cpu_die(); }
gpl-2.0
sooorajjj/android_kernel_cyanogen_msm8916
drivers/cpufreq/cris-etraxfs-cpufreq.c
1243
3532
#include <linux/init.h> #include <linux/module.h> #include <linux/cpufreq.h> #include <hwregs/reg_map.h> #include <arch/hwregs/reg_rdwr.h> #include <arch/hwregs/config_defs.h> #include <arch/hwregs/bif_core_defs.h> static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data); static struct notifier_block cris_sdram_freq_notifier_block = { .notifier_call = cris_sdram_freq_notifier }; static struct cpufreq_frequency_table cris_freq_table[] = { {0x01, 6000}, {0x02, 200000}, {0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) { reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); return clk_ctrl.pll ? 200000 : 6000; } static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, unsigned int state) { struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); freqs.old = cris_freq_get_cpu_frequency(policy->cpu); freqs.new = cris_freq_table[state].frequency; cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); local_irq_disable(); /* Even though we may be SMP they will share the same clock * so all settings are made on CPU0. */ if (cris_freq_table[state].frequency == 200000) clk_ctrl.pll = 1; else clk_ctrl.pll = 0; REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); local_irq_enable(); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); } static int cris_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target (policy, cris_freq_table, target_freq, relation, &newstate)) return -EINVAL; cris_freq_set_cpu_state(policy, newstate); return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { int result; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ policy->cur = cris_freq_get_cpu_frequency(0); result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); if (result) return (result); cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); return 0; } static int cris_freq_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *cris_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, .verify = cris_freq_verify, .target = cris_freq_target, .init = cris_freq_cpu_init, .exit = cris_freq_cpu_exit, .name = "cris_freq", .attr = cris_freq_attr, }; static int __init cris_freq_init(void) { int ret; ret = cpufreq_register_driver(&cris_freq_driver); cpufreq_register_notifier(&cris_sdram_freq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return ret; } static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data) { int i; struct cpufreq_freqs *freqs = data; if (val == CPUFREQ_PRECHANGE) { reg_bif_core_rw_sdram_timing timing = REG_RD(bif_core, regi_bif_core, rw_sdram_timing); timing.cpd = (freqs->new == 200000 ? 0 : 1); if (freqs->new == 200000) for (i = 0; i < 50000; i++) ; REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); } return 0; } module_init(cris_freq_init);
gpl-2.0
Spondoolies-Tech/kernel
drivers/net/hamradio/6pack.c
2523
25173
/* * 6pack.c This module implements the 6pack protocol for kernel-based * devices like TTY. It interfaces between a raw TTY and the * kernel's AX.25 protocol layers. * * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de> * Ralf Baechle DL5RB <ralf@linux-mips.org> * * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by * * Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> */ #include <linux/module.h> #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/semaphore.h> #include <linux/compat.h> #include <linux/atomic.h> #define SIXPACK_VERSION "Revision: 0.3.0" /* sixpack priority commands */ #define SIXP_SEOF 0x40 /* start and end of a 6pack frame */ #define SIXP_TX_URUN 0x48 /* transmit overrun */ #define SIXP_RX_ORUN 0x50 /* receive overrun */ #define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */ #define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */ /* masks to get certain bits out of the status bytes sent by the TNC */ #define SIXP_CMD_MASK 0xC0 #define SIXP_CHN_MASK 0x07 #define SIXP_PRIO_CMD_MASK 0x80 #define SIXP_STD_CMD_MASK 0x40 #define SIXP_PRIO_DATA_MASK 0x38 #define SIXP_TX_MASK 0x20 #define SIXP_RX_MASK 0x10 #define SIXP_RX_DCD_MASK 0x18 #define SIXP_LEDS_ON 0x78 #define SIXP_LEDS_OFF 0x60 #define SIXP_CON 0x08 #define SIXP_STA 0x10 #define SIXP_FOUND_TNC 0xe9 #define SIXP_CON_ON 0x68 #define SIXP_DCD_MASK 0x08 #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ #define SIXP_TXDELAY (HZ/4) /* in 1 s */ #define SIXP_PERSIST 50 /* in 256ths */ #define SIXP_SLOTTIME (HZ/10) /* in 1 s */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ /* 6pack configuration. */ #define SIXP_NRUNIT 31 /* MAX number of 6pack channels */ #define SIXP_MTU 256 /* Default MTU */ enum sixpack_flags { SIXPF_ERROR, /* Parity, etc. error */ }; struct sixpack { /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char *rbuff; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ unsigned char raw_buf[4]; unsigned char cooked_buf[400]; unsigned int rx_count; unsigned int rx_count_cooked; int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ unsigned long flags; /* Flag values/ mode etc */ unsigned char mode; /* 6pack mode */ /* 6pack stuff */ unsigned char tx_delay; unsigned char persistence; unsigned char slottime; unsigned char duplex; unsigned char led_state; unsigned char status; unsigned char status1; unsigned char status2; unsigned char tx_enable; unsigned char tnc_state; struct timer_list tx_t; struct timer_list resync_t; atomic_t refcnt; struct semaphore dead_sem; spinlock_t lock; }; #define AX25_6PACK_HEADER_LEN 0 static void sixpack_decode(struct sixpack *, unsigned char[], int); static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char); /* * Perform the persistence/slottime algorithm for CSMA access. If the * persistence check was successful, write the data to the serial driver. * Note that in case of DAMA operation, the data is not sent here. */ static void sp_xmit_on_air(unsigned long channel) { struct sixpack *sp = (struct sixpack *) channel; int actual, when = sp->slottime; static unsigned char random; random = random * 17 + 41; if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->status2 = 0; } else mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); } /* ----> 6pack timer interrupt handler and friends. <---- */ /* Encapsulate one AX.25 frame and stuff into a TTY queue. */ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) { unsigned char *msg, *p = icp; int actual, count; if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ msg = "oversized transmit packet!"; goto out_drop; } if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ msg = "oversized transmit packet!"; goto out_drop; } if (p[0] > 5) { msg = "invalid KISS command"; goto out_drop; } if ((p[0] != 0) && (len > 2)) { msg = "KISS control packet too long"; goto out_drop; } if ((p[0] == 0) && (len < 15)) { msg = "bad AX.25 packet to transmit"; goto out_drop; } count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay); set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); switch (p[0]) { case 1: sp->tx_delay = p[1]; return; case 2: sp->persistence = p[1]; return; case 3: sp->slottime = p[1]; return; case 4: /* ignored */ return; case 5: sp->duplex = p[1]; return; } if (p[0] != 0) return; /* * In case of fullduplex or DAMA operation, we don't take care about the * state of the DCD or of any timers, as the determination of the * correct time to send is the job of the AX.25 layer. We send * immediately after data has arrived. */ if (sp->duplex == 1) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, count); sp->xleft = count - actual; sp->xhead = sp->xbuff + actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } else { sp->xleft = count; sp->xhead = sp->xbuff; sp->status2 = count; sp_xmit_on_air((unsigned long)sp); } return; out_drop: sp->dev->stats.tx_dropped++; netif_start_queue(sp->dev); if (net_ratelimit()) printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; sp_encaps(sp, skb->data, skb->len); spin_unlock_bh(&sp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int sp_open_dev(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (sp->tty == NULL) return -ENODEV; return 0; } /* Close the low-level part of the 6pack channel. */ static int sp_close(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); if (sp->tty) { /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); } netif_stop_queue(dev); spin_unlock_bh(&sp->lock); return 0; } /* Return the frame type ID */ static int sp_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != ETH_P_AX25) return ax25_hard_header(skb, dev, type, daddr, saddr, len); #endif return 0; } static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; netif_tx_lock_bh(dev); netif_addr_lock(dev); memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); return 0; } static int sp_rebuild_header(struct sk_buff *skb) { #ifdef CONFIG_INET return ax25_rebuild_header(skb); #else return 0; #endif } static const struct header_ops sp_header_ops = { .create = sp_header, .rebuild = sp_rebuild_header, }; static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, .ndo_start_xmit = sp_xmit, .ndo_set_mac_address = sp_set_mac_address, }; static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; dev->destructor = free_netdev; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &sp_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); dev->flags = 0; } /* Send one completely decapsulated IP datagram to the IP layer. */ /* * This is the routine that sends the received data to the kernel AX.25. * 'cmd' is the KISS command. For AX.25 data, it is zero. */ static void sp_bump(struct sixpack *sp, char cmd) { struct sk_buff *skb; int count; unsigned char *ptr; count = sp->rcount + 1; sp->dev->stats.rx_bytes += count; if ((skb = dev_alloc_skb(count)) == NULL) goto out_mem; ptr = skb_put(skb, count); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); skb->protocol = ax25_type_trans(skb, sp->dev); netif_rx(skb); sp->dev->stats.rx_packets++; return; out_mem: sp->dev->stats.rx_dropped++; } /* ----------------------------------------------------------------------- */ /* * We have a potential race on dereferencing tty->disc_data, because the tty * layer provides no locking at all - thus one cpu could be running * sixpack_receive_buf while another calls sixpack_close, which zeroes * tty->disc_data and frees the memory that sixpack_receive_buf is using. The * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ static DEFINE_RWLOCK(disc_data_lock); static struct sixpack *sp_get(struct tty_struct *tty) { struct sixpack *sp; read_lock(&disc_data_lock); sp = tty->disc_data; if (sp) atomic_inc(&sp->refcnt); read_unlock(&disc_data_lock); return sp; } static void sp_put(struct sixpack *sp) { if (atomic_dec_and_test(&sp->refcnt)) up(&sp->dead_sem); } /* * Called by the TTY driver when there's room for more data. If we have * more packets to send, we send them here. */ static void sixpack_write_wakeup(struct tty_struct *tty) { struct sixpack *sp = sp_get(tty); int actual; if (!sp) return; if (sp->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sp->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); sp->tx_enable = 0; netif_wake_queue(sp->dev); goto out; } if (sp->tx_enable) { actual = tty->ops->write(tty, sp->xhead, sp->xleft); sp->xleft -= actual; sp->xhead += actual; } out: sp_put(sp); } /* ----------------------------------------------------------------------- */ /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ static void sixpack_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct sixpack *sp; unsigned char buf[512]; int count1; if (!count) return; sp = sp_get(tty); if (!sp) return; memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf)); /* Read the characters out of the buffer */ count1 = count; while (count) { count--; if (fp && *fp++) { if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) sp->dev->stats.rx_errors++; continue; } } sixpack_decode(sp, buf, count1); sp_put(sp); tty_unthrottle(tty); } /* * Try to resync the TNC. Called by the resync timer defined in * decode_prio_command */ #define TNC_UNINITIALIZED 0 #define TNC_UNSYNC_STARTUP 1 #define TNC_UNSYNCED 2 #define TNC_IN_SYNC 3 static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { char *msg; switch (new_tnc_state) { default: /* gcc oh piece-o-crap ... */ case TNC_UNSYNC_STARTUP: msg = "Synchronizing with TNC"; break; case TNC_UNSYNCED: msg = "Lost synchronization with TNC\n"; break; case TNC_IN_SYNC: msg = "Found TNC"; break; } sp->tnc_state = new_tnc_state; printk(KERN_INFO "%s: %s\n", sp->dev->name, msg); } static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { int old_tnc_state = sp->tnc_state; if (old_tnc_state != new_tnc_state) __tnc_set_sync_state(sp, new_tnc_state); } static void resync_tnc(unsigned long channel) { struct sixpack *sp = (struct sixpack *) channel; static char resync_cmd = 0xe8; /* clear any data that might have been received */ sp->rx_count = 0; sp->rx_count_cooked = 0; /* reset state machine */ sp->status = 1; sp->status1 = 1; sp->status2 = 0; /* resync the TNC */ sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tty->ops->write(sp->tty, &resync_cmd, 1); /* Start resync timer again -- the TNC might be still absent */ del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; add_timer(&sp->resync_t); } static inline int tnc_init(struct sixpack *sp) { unsigned char inbyte = 0xe8; tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); sp->tty->ops->write(sp->tty, &inbyte, 1); del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; add_timer(&sp->resync_t); return 0; } /* * Open the high-level part of the 6pack channel. * This function is called by the TTY module when the * 6pack line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free 6pcack channel... */ static int sixpack_open(struct tty_struct *tty) { char *rbuff = NULL, *xbuff = NULL; struct net_device *dev; struct sixpack *sp; unsigned long len; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup); if (!dev) { err = -ENOMEM; goto out; } sp = netdev_priv(dev); sp->dev = dev; spin_lock_init(&sp->lock); atomic_set(&sp->refcnt, 1); sema_init(&sp->dead_sem, 0); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ len = dev->mtu * 2; rbuff = kmalloc(len + 4, GFP_KERNEL); xbuff = kmalloc(len + 4, GFP_KERNEL); if (rbuff == NULL || xbuff == NULL) { err = -ENOBUFS; goto out_free; } spin_lock_bh(&sp->lock); sp->tty = tty; sp->rbuff = rbuff; sp->xbuff = xbuff; sp->mtu = AX25_MTU + 73; sp->buffsize = len; sp->rcount = 0; sp->rx_count = 0; sp->rx_count_cooked = 0; sp->xleft = 0; sp->flags = 0; /* Clear ESCAPE & ERROR flags */ sp->duplex = 0; sp->tx_delay = SIXP_TXDELAY; sp->persistence = SIXP_PERSIST; sp->slottime = SIXP_SLOTTIME; sp->led_state = 0x60; sp->status = 1; sp->status1 = 1; sp->status2 = 0; sp->tx_enable = 0; netif_start_queue(dev); init_timer(&sp->tx_t); sp->tx_t.function = sp_xmit_on_air; sp->tx_t.data = (unsigned long) sp; init_timer(&sp->resync_t); spin_unlock_bh(&sp->lock); /* Done. We have linked the TTY line to a channel. */ tty->disc_data = sp; tty->receive_room = 65536; /* Now we're ready to register. */ if (register_netdev(dev)) goto out_free; tnc_init(sp); return 0; out_free: kfree(xbuff); kfree(rbuff); if (dev) free_netdev(dev); out: return err; } /* * Close down a 6pack channel. * This means flushing out any pending queues, and then restoring the * TTY line discipline to what it was before it got hooked to 6pack * (which usually is TTY again). */ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; write_lock_bh(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; write_unlock_bh(&disc_data_lock); if (!sp) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!atomic_dec_and_test(&sp->refcnt)) down(&sp->dead_sem); unregister_netdev(sp->dev); del_timer(&sp->tx_t); del_timer(&sp->resync_t); /* Free all 6pack frame buffers. */ kfree(sp->rbuff); kfree(sp->xbuff); } /* Perform I/O control on an active 6pack channel. */ static int sixpack_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct sixpack *sp = sp_get(tty); struct net_device *dev; unsigned int tmp, err; if (!sp) return -ENXIO; dev = sp->dev; switch(cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, dev->name, strlen(dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(0, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } sp->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *) arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = tty_mode_ioctl(tty, file, cmd, arg); } sp_put(sp); return err; } #ifdef CONFIG_COMPAT static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCGIFNAME: case SIOCGIFENCAP: case SIOCSIFENCAP: case SIOCSIFHWADDR: return sixpack_ioctl(tty, file, cmd, (unsigned long)compat_ptr(arg)); } return -ENOIOCTLCMD; } #endif static struct tty_ldisc_ops sp_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "6pack", .open = sixpack_open, .close = sixpack_close, .ioctl = sixpack_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sixpack_compat_ioctl, #endif .receive_buf = sixpack_receive_buf, .write_wakeup = sixpack_write_wakeup, }; /* Initialize 6pack control device -- register 6pack line discipline */ static const char msg_banner[] __initconst = KERN_INFO \ "AX.25: 6pack driver, " SIXPACK_VERSION "\n"; static const char msg_regfail[] __initconst = KERN_ERR \ "6pack: can't register line discipline (err = %d)\n"; static int __init sixpack_init_driver(void) { int status; printk(msg_banner); /* Register the provided line protocol discipline */ if ((status = tty_register_ldisc(N_6PACK, &sp_ldisc)) != 0) printk(msg_regfail, status); return status; } static const char msg_unregfail[] = KERN_ERR \ "6pack: can't unregister line discipline (err = %d)\n"; static void __exit sixpack_exit_driver(void) { int ret; if ((ret = tty_unregister_ldisc(N_6PACK))) printk(msg_unregfail, ret); } /* encode an AX.25 packet into 6pack */ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, int length, unsigned char tx_delay) { int count = 0; unsigned char checksum = 0, buf[400]; int raw_count = 0; tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK; tx_buf_raw[raw_count++] = SIXP_SEOF; buf[0] = tx_delay; for (count = 1; count < length; count++) buf[count] = tx_buf[count]; for (count = 0; count < length; count++) checksum += buf[count]; buf[length] = (unsigned char) 0xff - checksum; for (count = 0; count <= length; count++) { if ((count % 3) == 0) { tx_buf_raw[raw_count++] = (buf[count] & 0x3f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30); } else if ((count % 3) == 1) { tx_buf_raw[raw_count++] |= (buf[count] & 0x0f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c); } else { tx_buf_raw[raw_count++] |= (buf[count] & 0x03); tx_buf_raw[raw_count++] = (buf[count] >> 2); } } if ((length % 3) != 2) raw_count++; tx_buf_raw[raw_count++] = SIXP_SEOF; return raw_count; } /* decode 4 sixpack-encoded bytes into 3 data bytes */ static void decode_data(struct sixpack *sp, unsigned char inbyte) { unsigned char *buf; if (sp->rx_count != 3) { sp->raw_buf[sp->rx_count++] = inbyte; return; } buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[2] & 0x03) | (inbyte << 2); sp->rx_count = 0; } /* identify and execute a 6pack priority command byte */ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) { unsigned char channel; int actual; channel = cmd & SIXP_CHN_MASK; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, if the DCD flag has been set without the RX flag in the previous prio command. If DCD has not been set before, something in the transmission has gone wrong. In this case, RX and DCD are cleared in order to prevent the decode_data routine from reading further data that might be corrupt. */ if (((sp->status & SIXP_DCD_MASK) == 0) && ((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) { if (sp->status != 1) printk(KERN_DEBUG "6pack: protocol violation\n"); else sp->status = 0; cmd &= ~SIXP_RX_DCD_MASK; } sp->status = cmd & SIXP_PRIO_DATA_MASK; } else { /* output watchdog char if idle */ if ((sp->status2 != 0) && (sp->duplex == 1)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->status2 = 0; } } /* needed to trigger the TNC watchdog */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ if (sp->tnc_state == TNC_IN_SYNC) { del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT; add_timer(&sp->resync_t); } sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } /* identify and execute a standard 6pack command byte */ static void decode_std_command(struct sixpack *sp, unsigned char cmd) { unsigned char checksum = 0, rest = 0, channel; short i; channel = cmd & SIXP_CHN_MASK; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { sp->led_state = 0x68; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } } else { sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) decode_data(sp, 0); if (rest == 2) sp->rx_count_cooked -= 2; else if (rest == 3) sp->rx_count_cooked -= 1; for (i = 0; i < sp->rx_count_cooked; i++) checksum += sp->cooked_buf[i]; if (checksum != SIXP_CHKSUM) { printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum); } else { sp->rcount = sp->rx_count_cooked-2; sp_bump(sp, 0); } sp->rx_count_cooked = 0; } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); break; case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n"); break; case SIXP_RX_BUF_OVL: printk(KERN_DEBUG "6pack: RX buffer overflow\n"); } } /* decode a 6pack packet */ static void sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count) { unsigned char inbyte; int count1; for (count1 = 0; count1 < count; count1++) { inbyte = pre_rbuff[count1]; if (inbyte == SIXP_FOUND_TNC) { tnc_set_sync_state(sp, TNC_IN_SYNC); del_timer(&sp->resync_t); } if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) decode_data(sp, inbyte); } } MODULE_AUTHOR("Ralf Baechle DO1GRB <ralf@linux-mips.org>"); MODULE_DESCRIPTION("6pack driver for AX.25"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_6PACK); module_init(sixpack_init_driver); module_exit(sixpack_exit_driver);
gpl-2.0