repo_name
string
path
string
copies
string
size
string
content
string
license
string
yoshinorim/mysql-5.6
storage/ndb/test/ndbapi/bank/bankTimer.cpp
120
1913
/* Copyright (C) 2003-2006 MySQL AB, 2009 Sun Microsystems, Inc. All rights reserved. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <ndb_global.h> #include <NdbOut.hpp> #include <NdbApi.hpp> #include <NdbMain.h> #include <NDBT.hpp> #include <NdbSleep.h> #include <getarg.h> #include "Bank.hpp" int main(int argc, const char** argv){ ndb_init(); int _help = 0; int _wait = 30; const char * _database="BANK"; struct getargs args[] = { { "wait", 'w', arg_integer, &_wait, "Max time to wait between days", "secs" }, { "database", 'd', arg_string, &_database, "Database name", ""}, { "usage", '?', arg_flag, &_help, "Print help", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; char desc[] = "This program will increase time in the bank\n"; if(getarg(args, num_args, argc, argv, &optind) || _help) { arg_printusage(args, num_args, argv[0], desc); return NDBT_ProgramExit(NDBT_WRONGARGS); } Ndb_cluster_connection con; if(con.connect(12, 5, 1) != 0) { return NDBT_ProgramExit(NDBT_FAILED); } Bank bank(con,_database); if (bank.performIncreaseTime(_wait) != 0) return NDBT_ProgramExit(NDBT_FAILED); return NDBT_ProgramExit(NDBT_OK); }
gpl-2.0
sky7sea/linux
drivers/net/can/bfin_can.c
376
19605
/* * Blackfin On-Chip CAN Driver * * Copyright 2004-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <asm/portmux.h> #define DRV_NAME "bfin_can" #define BFIN_CAN_TIMEOUT 100 #define TX_ECHO_SKB_MAX 1 /* transmit and receive channels */ #define TRANSMIT_CHL 24 #define RECEIVE_STD_CHL 0 #define RECEIVE_EXT_CHL 4 #define RECEIVE_RTR_CHL 8 #define RECEIVE_EXT_RTR_CHL 12 #define MAX_CHL_NUMBER 32 /* All Blackfin system MMRs are padded to 32bits even if the register * itself is only 16bits. So use a helper macro to streamline this */ #define __BFP(m) u16 m; u16 __pad_##m /* bfin can registers layout */ struct bfin_can_mask_regs { __BFP(aml); __BFP(amh); }; struct bfin_can_channel_regs { /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */ u16 data[8]; __BFP(dlc); __BFP(tsv); __BFP(id0); __BFP(id1); }; struct bfin_can_regs { /* global control and status registers */ __BFP(mc1); /* offset 0x00 */ __BFP(md1); /* offset 0x04 */ __BFP(trs1); /* offset 0x08 */ __BFP(trr1); /* offset 0x0c */ __BFP(ta1); /* offset 0x10 */ __BFP(aa1); /* offset 0x14 */ __BFP(rmp1); /* offset 0x18 */ __BFP(rml1); /* offset 0x1c */ __BFP(mbtif1); /* offset 0x20 */ __BFP(mbrif1); /* offset 0x24 */ __BFP(mbim1); /* offset 0x28 */ __BFP(rfh1); /* offset 0x2c */ __BFP(opss1); /* offset 0x30 */ u32 __pad1[3]; __BFP(mc2); /* offset 0x40 */ __BFP(md2); /* offset 0x44 */ __BFP(trs2); /* offset 0x48 */ __BFP(trr2); /* offset 0x4c */ __BFP(ta2); /* offset 0x50 */ __BFP(aa2); /* offset 0x54 */ __BFP(rmp2); /* offset 0x58 */ __BFP(rml2); /* offset 0x5c */ __BFP(mbtif2); /* offset 0x60 */ __BFP(mbrif2); /* offset 0x64 */ __BFP(mbim2); /* offset 0x68 */ __BFP(rfh2); /* offset 0x6c */ __BFP(opss2); /* offset 0x70 */ u32 __pad2[3]; __BFP(clock); /* offset 0x80 */ __BFP(timing); /* offset 0x84 */ __BFP(debug); /* offset 0x88 */ __BFP(status); /* offset 0x8c */ __BFP(cec); /* offset 0x90 */ __BFP(gis); /* offset 0x94 */ __BFP(gim); /* offset 0x98 */ __BFP(gif); /* offset 0x9c */ __BFP(control); /* offset 0xa0 */ __BFP(intr); /* offset 0xa4 */ __BFP(version); /* offset 0xa8 */ __BFP(mbtd); /* offset 0xac */ __BFP(ewr); /* offset 0xb0 */ __BFP(esr); /* offset 0xb4 */ u32 __pad3[2]; __BFP(ucreg); /* offset 0xc0 */ __BFP(uccnt); /* offset 0xc4 */ __BFP(ucrc); /* offset 0xc8 */ __BFP(uccnf); /* offset 0xcc */ u32 __pad4[1]; __BFP(version2); /* offset 0xd4 */ u32 __pad5[10]; /* channel(mailbox) mask and message registers */ struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ }; #undef __BFP #define SRS 0x0001 /* Software Reset */ #define SER 0x0008 /* Stuff Error */ #define BOIM 0x0008 /* Enable Bus Off Interrupt */ #define CCR 0x0080 /* CAN Configuration Mode Request */ #define CCA 0x0080 /* Configuration Mode Acknowledge */ #define SAM 0x0080 /* Sampling */ #define AME 0x8000 /* Acceptance Mask Enable */ #define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */ #define RMLIS 0x0080 /* RX Message Lost IRQ Status */ #define RTR 0x4000 /* Remote Frame Transmission Request */ #define BOIS 0x0008 /* Bus Off IRQ Status */ #define IDE 0x2000 /* Identifier Extension */ #define EPIS 0x0004 /* Error-Passive Mode IRQ Status */ #define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */ #define EWTIS 0x0001 /* TX Error Count IRQ Status */ #define EWRIS 0x0002 /* RX Error Count IRQ Status */ #define BEF 0x0040 /* Bit Error Flag */ #define FER 0x0080 /* Form Error Flag */ #define SMR 0x0020 /* Sleep Mode Request */ #define SMACK 0x0008 /* Sleep Mode Acknowledge */ /* * bfin can private data */ struct bfin_can_priv { struct can_priv can; /* must be the first member */ struct net_device *dev; void __iomem *membase; int rx_irq; int tx_irq; int err_irq; unsigned short *pin_list; }; /* * bfin can timing parameters */ static const struct can_bittiming_const bfin_can_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, /* * Although the BRP field can be set to any value, it is recommended * that the value be greater than or equal to 4, as restrictions * apply to the bit timing configuration when BRP is less than 4. */ .brp_min = 4, .brp_max = 1024, .brp_inc = 1, }; static int bfin_can_set_bittiming(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; struct can_bittiming *bt = &priv->can.bittiming; u16 clk, timing; clk = bt->brp - 1; timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) | ((bt->phase_seg2 - 1) << 4); /* * If the SAM bit is set, the input signal is oversampled three times * at the SCLK rate. */ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) timing |= SAM; writew(clk, &reg->clock); writew(timing, &reg->timing); netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing); return 0; } static void bfin_can_set_reset_mode(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; int timeout = BFIN_CAN_TIMEOUT; int i; /* disable interrupts */ writew(0, &reg->mbim1); writew(0, &reg->mbim2); writew(0, &reg->gim); /* reset can and enter configuration mode */ writew(SRS | CCR, &reg->control); writew(CCR, &reg->control); while (!(readw(&reg->control) & CCA)) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to enter configuration mode\n"); BUG(); } } /* * All mailbox configurations are marked as inactive * by writing to CAN Mailbox Configuration Registers 1 and 2 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled */ writew(0, &reg->mc1); writew(0, &reg->mc2); /* Set Mailbox Direction */ writew(0xFFFF, &reg->md1); /* mailbox 1-16 are RX */ writew(0, &reg->md2); /* mailbox 17-32 are TX */ /* RECEIVE_STD_CHL */ for (i = 0; i < 2; i++) { writew(0, &reg->chl[RECEIVE_STD_CHL + i].id0); writew(AME, &reg->chl[RECEIVE_STD_CHL + i].id1); writew(0, &reg->chl[RECEIVE_STD_CHL + i].dlc); writew(0x1FFF, &reg->msk[RECEIVE_STD_CHL + i].amh); writew(0xFFFF, &reg->msk[RECEIVE_STD_CHL + i].aml); } /* RECEIVE_EXT_CHL */ for (i = 0; i < 2; i++) { writew(0, &reg->chl[RECEIVE_EXT_CHL + i].id0); writew(AME | IDE, &reg->chl[RECEIVE_EXT_CHL + i].id1); writew(0, &reg->chl[RECEIVE_EXT_CHL + i].dlc); writew(0x1FFF, &reg->msk[RECEIVE_EXT_CHL + i].amh); writew(0xFFFF, &reg->msk[RECEIVE_EXT_CHL + i].aml); } writew(BIT(TRANSMIT_CHL - 16), &reg->mc2); writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mc1); priv->can.state = CAN_STATE_STOPPED; } static void bfin_can_set_normal_mode(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; int timeout = BFIN_CAN_TIMEOUT; /* * leave configuration mode */ writew(readw(&reg->control) & ~CCR, &reg->control); while (readw(&reg->status) & CCA) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to leave configuration mode\n"); BUG(); } } /* * clear _All_ tx and rx interrupts */ writew(0xFFFF, &reg->mbtif1); writew(0xFFFF, &reg->mbtif2); writew(0xFFFF, &reg->mbrif1); writew(0xFFFF, &reg->mbrif2); /* * clear global interrupt status register */ writew(0x7FF, &reg->gis); /* overwrites with '1' */ /* * Initialize Interrupts * - set bits in the mailbox interrupt mask register * - global interrupt mask */ writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mbim1); writew(BIT(TRANSMIT_CHL - 16), &reg->mbim2); writew(EPIM | BOIM | RMLIM, &reg->gim); } static void bfin_can_start(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); /* enter reset mode */ if (priv->can.state != CAN_STATE_STOPPED) bfin_can_set_reset_mode(dev); /* leave reset mode */ bfin_can_set_normal_mode(dev); } static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: bfin_can_start(dev); if (netif_queue_stopped(dev)) netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static int bfin_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; u16 cec = readw(&reg->cec); bec->txerr = cec >> 8; bec->rxerr = cec; return 0; } static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; struct can_frame *cf = (struct can_frame *)skb->data; u8 dlc = cf->can_dlc; canid_t id = cf->can_id; u8 *data = cf->data; u16 val; int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); /* fill id */ if (id & CAN_EFF_FLAG) { writew(id, &reg->chl[TRANSMIT_CHL].id0); val = ((id & 0x1FFF0000) >> 16) | IDE; } else val = (id << 2); if (id & CAN_RTR_FLAG) val |= RTR; writew(val | AME, &reg->chl[TRANSMIT_CHL].id1); /* fill payload */ for (i = 0; i < 8; i += 2) { val = ((7 - i) < dlc ? (data[7 - i]) : 0) + ((6 - i) < dlc ? (data[6 - i] << 8) : 0); writew(val, &reg->chl[TRANSMIT_CHL].data[i]); } /* fill data length code */ writew(dlc, &reg->chl[TRANSMIT_CHL].dlc); can_put_echo_skb(skb, dev, 0); /* set transmit request */ writew(BIT(TRANSMIT_CHL - 16), &reg->trs2); return 0; } static void bfin_can_rx(struct net_device *dev, u16 isrc) { struct bfin_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct bfin_can_regs __iomem *reg = priv->membase; struct can_frame *cf; struct sk_buff *skb; int obj; int i; u16 val; skb = alloc_can_skb(dev, &cf); if (skb == NULL) return; /* get id */ if (isrc & BIT(RECEIVE_EXT_CHL)) { /* extended frame format (EFF) */ cf->can_id = ((readw(&reg->chl[RECEIVE_EXT_CHL].id1) & 0x1FFF) << 16) + readw(&reg->chl[RECEIVE_EXT_CHL].id0); cf->can_id |= CAN_EFF_FLAG; obj = RECEIVE_EXT_CHL; } else { /* standard frame format (SFF) */ cf->can_id = (readw(&reg->chl[RECEIVE_STD_CHL].id1) & 0x1ffc) >> 2; obj = RECEIVE_STD_CHL; } if (readw(&reg->chl[obj].id1) & RTR) cf->can_id |= CAN_RTR_FLAG; /* get data length code */ cf->can_dlc = get_can_dlc(readw(&reg->chl[obj].dlc) & 0xF); /* get payload */ for (i = 0; i < 8; i += 2) { val = readw(&reg->chl[obj].data[i]); cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0; cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; } stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) { struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; enum can_state state = priv->can.state; skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; if (isrc & RMLIS) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } if (isrc & BOIS) { netdev_dbg(dev, "bus-off mode interrupt\n"); state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; can_bus_off(dev); } if (isrc & EPIS) { /* error passive interrupt */ netdev_dbg(dev, "error passive interrupt\n"); state = CAN_STATE_ERROR_PASSIVE; } if ((isrc & EWTIS) || (isrc & EWRIS)) { netdev_dbg(dev, "Error Warning Transmit/Receive Interrupt\n"); state = CAN_STATE_ERROR_WARNING; } if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { u16 cec = readw(&reg->cec); u8 rxerr = cec; u8 txerr = cec >> 8; cf->can_id |= CAN_ERR_CRTL; if (state == CAN_STATE_ERROR_WARNING) { priv->can.can_stats.error_warning++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } else { priv->can.can_stats.error_passive++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } if (status) { priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (status & BEF) cf->data[2] |= CAN_ERR_PROT_BIT; else if (status & FER) cf->data[2] |= CAN_ERR_PROT_FORM; else if (status & SER) cf->data[2] |= CAN_ERR_PROT_STUFF; else cf->data[2] |= CAN_ERR_PROT_UNSPEC; } priv->can.state = state; stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); return 0; } static irqreturn_t bfin_can_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; struct net_device_stats *stats = &dev->stats; u16 status, isrc; if ((irq == priv->tx_irq) && readw(&reg->mbtif2)) { /* transmission complete interrupt */ writew(0xFFFF, &reg->mbtif2); stats->tx_packets++; stats->tx_bytes += readw(&reg->chl[TRANSMIT_CHL].dlc); can_get_echo_skb(dev, 0); netif_wake_queue(dev); } else if ((irq == priv->rx_irq) && readw(&reg->mbrif1)) { /* receive interrupt */ isrc = readw(&reg->mbrif1); writew(0xFFFF, &reg->mbrif1); bfin_can_rx(dev, isrc); } else if ((irq == priv->err_irq) && readw(&reg->gis)) { /* error interrupt */ isrc = readw(&reg->gis); status = readw(&reg->esr); writew(0x7FF, &reg->gis); bfin_can_err(dev, isrc, status); } else { return IRQ_NONE; } return IRQ_HANDLED; } static int bfin_can_open(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); int err; /* set chip into reset mode */ bfin_can_set_reset_mode(dev); /* common open */ err = open_candev(dev); if (err) goto exit_open; /* register interrupt handler */ err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0, "bfin-can-rx", dev); if (err) goto exit_rx_irq; err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0, "bfin-can-tx", dev); if (err) goto exit_tx_irq; err = request_irq(priv->err_irq, &bfin_can_interrupt, 0, "bfin-can-err", dev); if (err) goto exit_err_irq; bfin_can_start(dev); netif_start_queue(dev); return 0; exit_err_irq: free_irq(priv->tx_irq, dev); exit_tx_irq: free_irq(priv->rx_irq, dev); exit_rx_irq: close_candev(dev); exit_open: return err; } static int bfin_can_close(struct net_device *dev) { struct bfin_can_priv *priv = netdev_priv(dev); netif_stop_queue(dev); bfin_can_set_reset_mode(dev); close_candev(dev); free_irq(priv->rx_irq, dev); free_irq(priv->tx_irq, dev); free_irq(priv->err_irq, dev); return 0; } static struct net_device *alloc_bfin_candev(void) { struct net_device *dev; struct bfin_can_priv *priv; dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); if (!dev) return NULL; priv = netdev_priv(dev); priv->dev = dev; priv->can.bittiming_const = &bfin_can_bittiming_const; priv->can.do_set_bittiming = bfin_can_set_bittiming; priv->can.do_set_mode = bfin_can_set_mode; priv->can.do_get_berr_counter = bfin_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; return dev; } static const struct net_device_ops bfin_can_netdev_ops = { .ndo_open = bfin_can_open, .ndo_stop = bfin_can_close, .ndo_start_xmit = bfin_can_start_xmit, .ndo_change_mtu = can_change_mtu, }; static int bfin_can_probe(struct platform_device *pdev) { int err; struct net_device *dev; struct bfin_can_priv *priv; struct resource *res_mem, *rx_irq, *tx_irq, *err_irq; unsigned short *pdata; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "No platform data provided!\n"); err = -EINVAL; goto exit; } res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1); err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2); if (!res_mem || !rx_irq || !tx_irq || !err_irq) { err = -EINVAL; goto exit; } /* request peripheral pins */ err = peripheral_request_list(pdata, dev_name(&pdev->dev)); if (err) goto exit; dev = alloc_bfin_candev(); if (!dev) { err = -ENOMEM; goto exit_peri_pin_free; } priv = netdev_priv(dev); priv->membase = devm_ioremap_resource(&pdev->dev, res_mem); if (IS_ERR(priv->membase)) { err = PTR_ERR(priv->membase); goto exit_peri_pin_free; } priv->rx_irq = rx_irq->start; priv->tx_irq = tx_irq->start; priv->err_irq = err_irq->start; priv->pin_list = pdata; priv->can.clock.freq = get_sclk(); platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &bfin_can_netdev_ops; bfin_can_set_reset_mode(dev); err = register_candev(dev); if (err) { dev_err(&pdev->dev, "registering failed (err=%d)\n", err); goto exit_candev_free; } dev_info(&pdev->dev, "%s device registered" "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n", DRV_NAME, priv->membase, priv->rx_irq, priv->tx_irq, priv->err_irq, priv->can.clock.freq); return 0; exit_candev_free: free_candev(dev); exit_peri_pin_free: peripheral_free_list(pdata); exit: return err; } static int bfin_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct bfin_can_priv *priv = netdev_priv(dev); bfin_can_set_reset_mode(dev); unregister_candev(dev); peripheral_free_list(priv->pin_list); free_candev(dev); return 0; } #ifdef CONFIG_PM static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) { struct net_device *dev = platform_get_drvdata(pdev); struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; int timeout = BFIN_CAN_TIMEOUT; if (netif_running(dev)) { /* enter sleep mode */ writew(readw(&reg->control) | SMR, &reg->control); while (!(readw(&reg->intr) & SMACK)) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to enter sleep mode\n"); BUG(); } } } return 0; } static int bfin_can_resume(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; if (netif_running(dev)) { /* leave sleep mode */ writew(0, &reg->intr); } return 0; } #else #define bfin_can_suspend NULL #define bfin_can_resume NULL #endif /* CONFIG_PM */ static struct platform_driver bfin_can_driver = { .probe = bfin_can_probe, .remove = bfin_can_remove, .suspend = bfin_can_suspend, .resume = bfin_can_resume, .driver = { .name = DRV_NAME, }, }; module_platform_driver(bfin_can_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
ISTweak/android_kernel_sharp_msm7x30
arch/powerpc/platforms/chrp/nvram.c
888
2226
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/rtas.h> #include "chrp.h" static unsigned int nvram_size; static unsigned char nvram_buf[4]; static DEFINE_SPINLOCK(nvram_lock); static unsigned char chrp_nvram_read(int addr) { unsigned int done; unsigned long flags; unsigned char ret; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return 0xff; } spin_lock_irqsave(&nvram_lock, flags); if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) ret = 0xff; else ret = nvram_buf[0]; spin_unlock_irqrestore(&nvram_lock, flags); return ret; } static void chrp_nvram_write(int addr, unsigned char val) { unsigned int done; unsigned long flags; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return; } spin_lock_irqsave(&nvram_lock, flags); nvram_buf[0] = val; if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); spin_unlock_irqrestore(&nvram_lock, flags); } void __init chrp_nvram_init(void) { struct device_node *nvram; const unsigned int *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return; nbytes_p = of_get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) return; nvram_size = *nbytes_p; printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); ppc_md.nvram_read_val = chrp_nvram_read; ppc_md.nvram_write_val = chrp_nvram_write; return; }
gpl-2.0
Supermaster34/3.0-Kernel-Galaxy-Player-US
crypto/vmac.c
2936
18296
/* * Modified to interface to the Linux kernel * Copyright (c) 2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ /* -------------------------------------------------------------------------- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. * This implementation is herby placed in the public domain. * The authors offers no warranty. Use at your own risk. * Please send bug reports to the authors. * Last modified: 17 APR 08, 1700 PDT * ----------------------------------------------------------------------- */ #include <linux/init.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <asm/byteorder.h> #include <crypto/scatterwalk.h> #include <crypto/vmac.h> #include <crypto/internal/hash.h> /* * Constants and masks */ #define UINT64_C(x) x##ULL const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #define pe64_to_cpup le64_to_cpup /* Prefer little endian */ #ifdef __LITTLE_ENDIAN #define INDEX_HIGH 1 #define INDEX_LOW 0 #else #define INDEX_HIGH 0 #define INDEX_LOW 1 #endif /* * The following routines are used in this implementation. They are * written via macros to simulate zero-overhead call-by-reference. * * MUL64: 64x64->128-bit multiplication * PMUL64: assumes top bits cleared on inputs * ADD128: 128x128->128-bit addition */ #define ADD128(rh, rl, ih, il) \ do { \ u64 _il = (il); \ (rl) += (_il); \ if ((rl) < (_il)) \ (rh)++; \ (rh) += (ih); \ } while (0) #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ do { \ u64 _i1 = (i1), _i2 = (i2); \ u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ rh = MUL32(_i1>>32, _i2>>32); \ rl = MUL32(_i1, _i2); \ ADD128(rh, rl, (m >> 32), (m << 32)); \ } while (0) #define MUL64(rh, rl, i1, i2) \ do { \ u64 _i1 = (i1), _i2 = (i2); \ u64 m1 = MUL32(_i1, _i2>>32); \ u64 m2 = MUL32(_i1>>32, _i2); \ rh = MUL32(_i1>>32, _i2>>32); \ rl = MUL32(_i1, _i2); \ ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ } while (0) /* * For highest performance the L1 NH and L2 polynomial hashes should be * carefully implemented to take advantage of one's target architecture. * Here these two hash functions are defined multiple time; once for * 64-bit architectures, once for 32-bit SSE2 architectures, and once * for the rest (32-bit) architectures. * For each, nh_16 *must* be defined (works on multiples of 16 bytes). * Optionally, nh_vmac_nhbytes can be defined (for multiples of * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two * NH computations at once). */ #ifdef CONFIG_64BIT #define nh_16(mp, kp, nw, rh, rl) \ do { \ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ do { \ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ do { \ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ do { \ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ ADD128(rh1, rl1, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ ADD128(rh1, rl1, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) #endif #define poly_step(ah, al, kh, kl, mh, ml) \ do { \ u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ /* compute ab*cd, put bd into result registers */ \ PMUL64(t3h, t3l, al, kh); \ PMUL64(t2h, t2l, ah, kl); \ PMUL64(t1h, t1l, ah, 2*kh); \ PMUL64(ah, al, al, kl); \ /* add 2 * ac to result */ \ ADD128(ah, al, t1h, t1l); \ /* add together ad + bc */ \ ADD128(t2h, t2l, t3h, t3l); \ /* now (ah,al), (t2l,2*t2h) need summing */ \ /* first add the high registers, carrying into t2h */ \ ADD128(t2h, ah, z, t2l); \ /* double t2h and add top bit of ah */ \ t2h = 2 * t2h + (ah >> 63); \ ah &= m63; \ /* now add the low registers */ \ ADD128(ah, al, mh, ml); \ ADD128(ah, al, z, t2h); \ } while (0) #else /* ! CONFIG_64BIT */ #ifndef nh_16 #define nh_16(mp, kp, nw, rh, rl) \ do { \ u64 t1, t2, m1, m2, t; \ int i; \ rh = rl = t = 0; \ for (i = 0; i < nw; i += 2) { \ t1 = pe64_to_cpup(mp+i) + kp[i]; \ t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ m2 = MUL32(t1 >> 32, t2); \ m1 = MUL32(t1, t2 >> 32); \ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ MUL32(t1, t2)); \ rh += (u64)(u32)(m1 >> 32) \ + (u32)(m2 >> 32); \ t += (u64)(u32)m1 + (u32)m2; \ } \ ADD128(rh, rl, (t >> 32), (t << 32)); \ } while (0) #endif static void poly_step_func(u64 *ahi, u64 *alo, const u64 *kh, const u64 *kl, const u64 *mh, const u64 *ml) { #define a0 (*(((u32 *)alo)+INDEX_LOW)) #define a1 (*(((u32 *)alo)+INDEX_HIGH)) #define a2 (*(((u32 *)ahi)+INDEX_LOW)) #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) #define k0 (*(((u32 *)kl)+INDEX_LOW)) #define k1 (*(((u32 *)kl)+INDEX_HIGH)) #define k2 (*(((u32 *)kh)+INDEX_LOW)) #define k3 (*(((u32 *)kh)+INDEX_HIGH)) u64 p, q, t; u32 t2; p = MUL32(a3, k3); p += p; p += *(u64 *)mh; p += MUL32(a0, k2); p += MUL32(a1, k1); p += MUL32(a2, k0); t = (u32)(p); p >>= 32; p += MUL32(a0, k3); p += MUL32(a1, k2); p += MUL32(a2, k1); p += MUL32(a3, k0); t |= ((u64)((u32)p & 0x7fffffff)) << 32; p >>= 31; p += (u64)(((u32 *)ml)[INDEX_LOW]); p += MUL32(a0, k0); q = MUL32(a1, k3); q += MUL32(a2, k2); q += MUL32(a3, k1); q += q; p += q; t2 = (u32)(p); p >>= 32; p += (u64)(((u32 *)ml)[INDEX_HIGH]); p += MUL32(a0, k1); p += MUL32(a1, k0); q = MUL32(a2, k3); q += MUL32(a3, k2); q += q; p += q; *(u64 *)(alo) = (p << 32) | t2; p >>= 32; *(u64 *)(ahi) = p + t; #undef a0 #undef a1 #undef a2 #undef a3 #undef k0 #undef k1 #undef k2 #undef k3 } #define poly_step(ah, al, kh, kl, mh, ml) \ poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) #endif /* end of specialized NH and poly definitions */ /* At least nh_16 is defined. Defined others as needed here */ #ifndef nh_16_2 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ do { \ nh_16(mp, kp, nw, rh, rl); \ nh_16(mp, ((kp)+2), nw, rh2, rl2); \ } while (0) #endif #ifndef nh_vmac_nhbytes #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ nh_16(mp, kp, nw, rh, rl) #endif #ifndef nh_vmac_nhbytes_2 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ do { \ nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ } while (0) #endif static void vhash_abort(struct vmac_ctx *ctx) { ctx->polytmp[0] = ctx->polykey[0] ; ctx->polytmp[1] = ctx->polykey[1] ; ctx->first_block_processed = 0; } static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; /* fully reduce (p1,p2)+(len,0) mod p127 */ t = p1 >> 63; p1 &= m63; ADD128(p1, p2, len, t); /* At this point, (p1,p2) is at most 2^127+(len<<64) */ t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); ADD128(p1, p2, z, t); p1 &= m63; /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ t = p1 + (p2 >> 32); t += (t >> 32); t += (u32)t > 0xfffffffeu; p1 += (t >> 32); p2 += (p1 << 32); /* compute (p1+k1)%p64 and (p2+k2)%p64 */ p1 += k1; p1 += (0 - (p1 < k1)) & 257; p2 += k2; p2 += (0 - (p2 < k2)) & 257; /* compute (p1+k1)*(p2+k2)%p64 */ MUL64(rh, rl, p1, p2); t = rh >> 56; ADD128(t, rl, z, rh); rh <<= 8; ADD128(t, rl, z, rh); t += t << 8; rl += t; rl += (0 - (rl < t)) & 257; rl += (0 - (rl > p64-1)) & 257; return rl; } static void vhash_update(const unsigned char *m, unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ struct vmac_ctx *ctx) { u64 rh, rl, *mptr; const u64 *kptr = (u64 *)ctx->nhkey; int i; u64 ch, cl; u64 pkh = ctx->polykey[0]; u64 pkl = ctx->polykey[1]; mptr = (u64 *)m; i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ ch = ctx->polytmp[0]; cl = ctx->polytmp[1]; if (!ctx->first_block_processed) { ctx->first_block_processed = 1; nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; ADD128(ch, cl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); i--; } while (i--) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); } ctx->polytmp[0] = ch; ctx->polytmp[1] = cl; } static u64 vhash(unsigned char m[], unsigned int mbytes, u64 *tagl, struct vmac_ctx *ctx) { u64 rh, rl, *mptr; const u64 *kptr = (u64 *)ctx->nhkey; int i, remaining; u64 ch, cl; u64 pkh = ctx->polykey[0]; u64 pkl = ctx->polykey[1]; mptr = (u64 *)m; i = mbytes / VMAC_NHBYTES; remaining = mbytes % VMAC_NHBYTES; if (ctx->first_block_processed) { ch = ctx->polytmp[0]; cl = ctx->polytmp[1]; } else if (i) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); ch &= m62; ADD128(ch, cl, pkh, pkl); mptr += (VMAC_NHBYTES/sizeof(u64)); i--; } else if (remaining) { nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); ch &= m62; ADD128(ch, cl, pkh, pkl); mptr += (VMAC_NHBYTES/sizeof(u64)); goto do_l3; } else {/* Empty String */ ch = pkh; cl = pkl; goto do_l3; } while (i--) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); } if (remaining) { nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); } do_l3: vhash_abort(ctx); remaining *= 8; return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); } static u64 vmac(unsigned char m[], unsigned int mbytes, unsigned char n[16], u64 *tagl, struct vmac_ctx_t *ctx) { u64 *in_n, *out_p; u64 p, h; int i; in_n = ctx->__vmac_ctx.cached_nonce; out_p = ctx->__vmac_ctx.cached_aes; i = n[15] & 1; if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { in_n[0] = *(u64 *)(n); in_n[1] = *(u64 *)(n+8); ((unsigned char *)in_n)[15] &= 0xFE; crypto_cipher_encrypt_one(ctx->child, (unsigned char *)out_p, (unsigned char *)in_n); ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); } p = be64_to_cpup(out_p + i); h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); return le64_to_cpu(p + h); } static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) { u64 in[2] = {0}, out[2]; unsigned i; int err = 0; err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); if (err) return err; /* Fill nh key */ ((unsigned char *)in)[0] = 0x80; for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { crypto_cipher_encrypt_one(ctx->child, (unsigned char *)out, (unsigned char *)in); ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); ((unsigned char *)in)[15] += 1; } /* Fill poly key */ ((unsigned char *)in)[0] = 0xC0; in[1] = 0; for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { crypto_cipher_encrypt_one(ctx->child, (unsigned char *)out, (unsigned char *)in); ctx->__vmac_ctx.polytmp[i] = ctx->__vmac_ctx.polykey[i] = be64_to_cpup(out) & mpoly; ctx->__vmac_ctx.polytmp[i+1] = ctx->__vmac_ctx.polykey[i+1] = be64_to_cpup(out+1) & mpoly; ((unsigned char *)in)[15] += 1; } /* Fill ip key */ ((unsigned char *)in)[0] = 0xE0; in[1] = 0; for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { do { crypto_cipher_encrypt_one(ctx->child, (unsigned char *)out, (unsigned char *)in); ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); ((unsigned char *)in)[15] += 1; } while (ctx->__vmac_ctx.l3key[i] >= p64 || ctx->__vmac_ctx.l3key[i+1] >= p64); } /* Invalidate nonce/aes cache and reset other elements */ ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ ctx->__vmac_ctx.first_block_processed = 0; return err; } static int vmac_setkey(struct crypto_shash *parent, const u8 *key, unsigned int keylen) { struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); if (keylen != VMAC_KEY_LEN) { crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return vmac_set_key((u8 *)key, ctx); } static int vmac_init(struct shash_desc *pdesc) { return 0; } static int vmac_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); vhash_update(p, len, &ctx->__vmac_ctx); return 0; } static int vmac_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); vmac_t mac; u8 nonce[16] = {}; mac = vmac(NULL, 0, nonce, NULL, ctx); memcpy(out, &mac, sizeof(vmac_t)); memset(&mac, 0, sizeof(vmac_t)); memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); return 0; } static int vmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void vmac_exit_tfm(struct crypto_tfm *tfm) { struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) return err; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return PTR_ERR(alg); inst = shash_alloc_instance("vmac", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; err = crypto_init_spawn(shash_instance_ctx(inst), alg, shash_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); if (err) goto out_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; inst->alg.digestsize = sizeof(vmac_t); inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); inst->alg.base.cra_init = vmac_init_tfm; inst->alg.base.cra_exit = vmac_exit_tfm; inst->alg.init = vmac_init; inst->alg.update = vmac_update; inst->alg.final = vmac_final; inst->alg.setkey = vmac_setkey; err = shash_register_instance(tmpl, inst); if (err) { out_free_inst: shash_free_instance(shash_crypto_instance(inst)); } out_put_alg: crypto_mod_put(alg); return err; } static struct crypto_template vmac_tmpl = { .name = "vmac", .create = vmac_create, .free = shash_free_instance, .module = THIS_MODULE, }; static int __init vmac_module_init(void) { return crypto_register_template(&vmac_tmpl); } static void __exit vmac_module_exit(void) { crypto_unregister_template(&vmac_tmpl); } module_init(vmac_module_init); module_exit(vmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VMAC hash algorithm");
gpl-2.0
MoKee/android_kernel_htc_endeavoru
drivers/of/of_pci_irq.c
2936
2810
#include <linux/kernel.h> #include <linux/of_pci.h> #include <linux/of_irq.h> #include <asm/prom.h> /** * of_irq_map_pci - Resolve the interrupt for a PCI device * @pdev: the device whose interrupt is to be resolved * @out_irq: structure of_irq filled by this function * * This function resolves the PCI interrupt for a given PCI device. If a * device-node exists for a given pci_dev, it will use normal OF tree * walking. If not, it will implement standard swizzling and walk up the * PCI tree until an device-node is found, at which point it will finish * resolving using the OF tree walking. */ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) { struct device_node *dn, *ppnode; struct pci_dev *ppdev; u32 lspec; __be32 lspec_be; __be32 laddr[3]; u8 pin; int rc; /* Check if we have a device node, if yes, fallback to standard * device tree parsing */ dn = pci_device_to_OF_node(pdev); if (dn) { rc = of_irq_map_one(dn, 0, out_irq); if (!rc) return rc; } /* Ok, we don't, time to have fun. Let's start by building up an * interrupt spec. we assume #interrupt-cells is 1, which is standard * for PCI. If you do different, then don't use that routine. */ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); if (rc != 0) return rc; /* No pin, exit */ if (pin == 0) return -ENODEV; /* Now we walk up the PCI tree */ lspec = pin; for (;;) { /* Get the pci_dev of our parent */ ppdev = pdev->bus->self; /* Ouch, it's a host bridge... */ if (ppdev == NULL) { ppnode = pci_bus_to_OF_node(pdev->bus); /* No node for host bridge ? give up */ if (ppnode == NULL) return -EINVAL; } else { /* We found a P2P bridge, check if it has a node */ ppnode = pci_device_to_OF_node(ppdev); } /* Ok, we have found a parent with a device-node, hand over to * the OF parsing code. * We build a unit address from the linux device to be used for * resolution. Note that we use the linux bus number which may * not match your firmware bus numbering. * Fortunately, in most cases, interrupt-map-mask doesn't * include the bus number as part of the matching. * You should still be careful about that though if you intend * to rely on this function (you ship a firmware that doesn't * create device nodes for all PCI devices). */ if (ppnode) break; /* We can only get here if we hit a P2P bridge with no node, * let's do standard swizzling and try again */ lspec = pci_swizzle_interrupt_pin(pdev, lspec); pdev = ppdev; } lspec_be = cpu_to_be32(lspec); laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); laddr[1] = laddr[2] = cpu_to_be32(0); return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq); } EXPORT_SYMBOL_GPL(of_irq_map_pci);
gpl-2.0
BobZmotion/android_kernel_lge_ms769
drivers/video/via/via_i2c.c
3192
7027
/* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/via-core.h> #include <linux/via_i2c.h> /* * There can only be one set of these, so there's no point in having * them be dynamically allocated... */ #define VIAFB_NUM_I2C 5 static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C]; static struct viafb_dev *i2c_vdev; /* Passed in from core */ static void via_i2c_setscl(void *data, int state) { u8 val; struct via_port_cfg *adap_data = data; unsigned long flags; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0; if (state) val |= 0x20; else val &= ~0x20; switch (adap_data->type) { case VIA_PORT_I2C: val |= 0x01; break; case VIA_PORT_GPIO: val |= 0x80; break; default: printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n"); } via_write_reg(adap_data->io_port, adap_data->ioport_index, val); spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); } static int via_i2c_getscl(void *data) { struct via_port_cfg *adap_data = data; unsigned long flags; int ret = 0; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x08) ret = 1; spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); return ret; } static int via_i2c_getsda(void *data) { struct via_port_cfg *adap_data = data; unsigned long flags; int ret = 0; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x04) ret = 1; spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); return ret; } static void via_i2c_setsda(void *data, int state) { u8 val; struct via_port_cfg *adap_data = data; unsigned long flags; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0; if (state) val |= 0x10; else val &= ~0x10; switch (adap_data->type) { case VIA_PORT_I2C: val |= 0x01; break; case VIA_PORT_GPIO: val |= 0x40; break; default: printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n"); } via_write_reg(adap_data->io_port, adap_data->ioport_index, val); spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); } int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata) { int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; if (!via_i2c_par[adap].is_active) return -ENODEV; *pdata = 0; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = slave_addr / 2; mm1[0] = index; msgs[0].len = 1; msgs[1].len = 1; msgs[0].buf = mm1; msgs[1].buf = pdata; ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); if (ret == 2) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data) { int ret; u8 msg[2] = { index, data }; struct i2c_msg msgs; if (!via_i2c_par[adap].is_active) return -ENODEV; msgs.flags = 0; msgs.addr = slave_addr / 2; msgs.len = 2; msgs.buf = msg; ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1); if (ret == 1) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len) { int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; if (!via_i2c_par[adap].is_active) return -ENODEV; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = slave_addr / 2; mm1[0] = index; msgs[0].len = 1; msgs[1].len = buff_len; msgs[0].buf = mm1; msgs[1].buf = buff; ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); if (ret == 2) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } /* * Allow other viafb subdevices to look up a specific adapter * by port name. */ struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which) { struct via_i2c_stuff *stuff = &via_i2c_par[which]; return &stuff->adapter; } EXPORT_SYMBOL_GPL(viafb_find_i2c_adapter); static int create_i2c_bus(struct i2c_adapter *adapter, struct i2c_algo_bit_data *algo, struct via_port_cfg *adap_cfg, struct pci_dev *pdev) { algo->setsda = via_i2c_setsda; algo->setscl = via_i2c_setscl; algo->getsda = via_i2c_getsda; algo->getscl = via_i2c_getscl; algo->udelay = 10; algo->timeout = 2; algo->data = adap_cfg; sprintf(adapter->name, "viafb i2c io_port idx 0x%02x", adap_cfg->ioport_index); adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_DDC; adapter->algo_data = algo; if (pdev) adapter->dev.parent = &pdev->dev; else adapter->dev.parent = NULL; /* i2c_set_adapdata(adapter, adap_cfg); */ /* Raise SCL and SDA */ via_i2c_setsda(adap_cfg, 1); via_i2c_setscl(adap_cfg, 1); udelay(20); return i2c_bit_add_bus(adapter); } static int viafb_i2c_probe(struct platform_device *platdev) { int i, ret; struct via_port_cfg *configs; i2c_vdev = platdev->dev.platform_data; configs = i2c_vdev->port_cfg; for (i = 0; i < VIAFB_NUM_PORTS; i++) { struct via_port_cfg *adap_cfg = configs++; struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i]; i2c_stuff->is_active = 0; if (adap_cfg->type == 0 || adap_cfg->mode != VIA_MODE_I2C) continue; ret = create_i2c_bus(&i2c_stuff->adapter, &i2c_stuff->algo, adap_cfg, NULL); /* FIXME: PCIDEV */ if (ret < 0) { printk(KERN_ERR "viafb: cannot create i2c bus %u:%d\n", i, ret); continue; /* Still try to make the rest */ } i2c_stuff->is_active = 1; } return 0; } static int viafb_i2c_remove(struct platform_device *platdev) { int i; for (i = 0; i < VIAFB_NUM_PORTS; i++) { struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i]; /* * Only remove those entries in the array that we've * actually used (and thus initialized algo_data) */ if (i2c_stuff->is_active) i2c_del_adapter(&i2c_stuff->adapter); } return 0; } static struct platform_driver via_i2c_driver = { .driver = { .name = "viafb-i2c", }, .probe = viafb_i2c_probe, .remove = viafb_i2c_remove, }; int viafb_i2c_init(void) { return platform_driver_register(&via_i2c_driver); } void viafb_i2c_exit(void) { platform_driver_unregister(&via_i2c_driver); }
gpl-2.0
UISS-Dev-Team/android_kernel_huawei_msm8x25
drivers/staging/panel/panel.c
4472
61401
/* * Front panel driver for Linux * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad) * connected to a parallel printer port. * * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit * serial module compatible with Samsung's KS0074. The pins may be connected in * any combination, everything is programmable. * * The keypad consists in a matrix of push buttons connecting input pins to * data output pins or to the ground. The combinations have to be hard-coded * in the driver, though several profiles exist and adding new ones is easy. * * Several profiles are provided for commonly found LCD+keypad modules on the * market, such as those found in Nexcom's appliances. * * FIXME: * - the initialization/deinitialization process is very dirty and should * be rewritten. It may even be buggy. * * TODO: * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs) * - make the LCD a part of a virtual screen of Vx*Vy * - make the inputs list smp-safe * - change the keyboard to a double mapping : signals -> key_id -> values * so that applications can change values without knowing signals * */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/parport.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <generated/utsrelease.h> #include <linux/io.h> #include <linux/uaccess.h> #define LCD_MINOR 156 #define KEYPAD_MINOR 185 #define PANEL_VERSION "0.9.5" #define LCD_MAXBYTES 256 /* max burst write */ #define KEYPAD_BUFFER 64 /* poll the keyboard this every second */ #define INPUT_POLL_TIME (HZ/50) /* a key starts to repeat after this times INPUT_POLL_TIME */ #define KEYPAD_REP_START (10) /* a key repeats this times INPUT_POLL_TIME */ #define KEYPAD_REP_DELAY (2) /* keep the light on this times INPUT_POLL_TIME for each flash */ #define FLASH_LIGHT_TEMPO (200) /* converts an r_str() input to an active high, bits string : 000BAOSE */ #define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3) #define PNL_PBUSY 0x80 /* inverted input, active low */ #define PNL_PACK 0x40 /* direct input, active low */ #define PNL_POUTPA 0x20 /* direct input, active high */ #define PNL_PSELECD 0x10 /* direct input, active high */ #define PNL_PERRORP 0x08 /* direct input, active low */ #define PNL_PBIDIR 0x20 /* bi-directional ports */ /* high to read data in or-ed with data out */ #define PNL_PINTEN 0x10 #define PNL_PSELECP 0x08 /* inverted output, active low */ #define PNL_PINITP 0x04 /* direct output, active low */ #define PNL_PAUTOLF 0x02 /* inverted output, active low */ #define PNL_PSTROBE 0x01 /* inverted output */ #define PNL_PD0 0x01 #define PNL_PD1 0x02 #define PNL_PD2 0x04 #define PNL_PD3 0x08 #define PNL_PD4 0x10 #define PNL_PD5 0x20 #define PNL_PD6 0x40 #define PNL_PD7 0x80 #define PIN_NONE 0 #define PIN_STROBE 1 #define PIN_D0 2 #define PIN_D1 3 #define PIN_D2 4 #define PIN_D3 5 #define PIN_D4 6 #define PIN_D5 7 #define PIN_D6 8 #define PIN_D7 9 #define PIN_AUTOLF 14 #define PIN_INITP 16 #define PIN_SELECP 17 #define PIN_NOT_SET 127 #define LCD_FLAG_S 0x0001 #define LCD_FLAG_ID 0x0002 #define LCD_FLAG_B 0x0004 /* blink on */ #define LCD_FLAG_C 0x0008 /* cursor on */ #define LCD_FLAG_D 0x0010 /* display on */ #define LCD_FLAG_F 0x0020 /* large font mode */ #define LCD_FLAG_N 0x0040 /* 2-rows mode */ #define LCD_FLAG_L 0x0080 /* backlight enabled */ #define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */ #define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */ /* macros to simplify use of the parallel port */ #define r_ctr(x) (parport_read_control((x)->port)) #define r_dtr(x) (parport_read_data((x)->port)) #define r_str(x) (parport_read_status((x)->port)) #define w_ctr(x, y) do { parport_write_control((x)->port, (y)); } while (0) #define w_dtr(x, y) do { parport_write_data((x)->port, (y)); } while (0) /* this defines which bits are to be used and which ones to be ignored */ /* logical or of the output bits involved in the scan matrix */ static __u8 scan_mask_o; /* logical or of the input bits involved in the scan matrix */ static __u8 scan_mask_i; typedef __u64 pmask_t; enum input_type { INPUT_TYPE_STD, INPUT_TYPE_KBD, }; enum input_state { INPUT_ST_LOW, INPUT_ST_RISING, INPUT_ST_HIGH, INPUT_ST_FALLING, }; struct logical_input { struct list_head list; pmask_t mask; pmask_t value; enum input_type type; enum input_state state; __u8 rise_time, fall_time; __u8 rise_timer, fall_timer, high_timer; union { struct { /* valid when type == INPUT_TYPE_STD */ void (*press_fct) (int); void (*release_fct) (int); int press_data; int release_data; } std; struct { /* valid when type == INPUT_TYPE_KBD */ /* strings can be non null-terminated */ char press_str[sizeof(void *) + sizeof(int)]; char repeat_str[sizeof(void *) + sizeof(int)]; char release_str[sizeof(void *) + sizeof(int)]; } kbd; } u; }; LIST_HEAD(logical_inputs); /* list of all defined logical inputs */ /* physical contacts history * Physical contacts are a 45 bits string of 9 groups of 5 bits each. * The 8 lower groups correspond to output bits 0 to 7, and the 9th group * corresponds to the ground. * Within each group, bits are stored in the same order as read on the port : * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0). * So, each __u64 (or pmask_t) is represented like this : * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00> */ /* what has just been read from the I/O ports */ static pmask_t phys_read; /* previous phys_read */ static pmask_t phys_read_prev; /* stabilized phys_read (phys_read|phys_read_prev) */ static pmask_t phys_curr; /* previous phys_curr */ static pmask_t phys_prev; /* 0 means that at least one logical signal needs be computed */ static char inputs_stable; /* these variables are specific to the keypad */ static char keypad_buffer[KEYPAD_BUFFER]; static int keypad_buflen; static int keypad_start; static char keypressed; static wait_queue_head_t keypad_read_wait; /* lcd-specific variables */ /* contains the LCD config state */ static unsigned long int lcd_flags; /* contains the LCD X offset */ static unsigned long int lcd_addr_x; /* contains the LCD Y offset */ static unsigned long int lcd_addr_y; /* current escape sequence, 0 terminated */ static char lcd_escape[LCD_ESCAPE_LEN + 1]; /* not in escape state. >=0 = escape cmd len */ static int lcd_escape_len = -1; /* * Bit masks to convert LCD signals to parallel port outputs. * _d_ are values for data port, _c_ are for control port. * [0] = signal OFF, [1] = signal ON, [2] = mask */ #define BIT_CLR 0 #define BIT_SET 1 #define BIT_MSK 2 #define BIT_STATES 3 /* * one entry for each bit on the LCD */ #define LCD_BIT_E 0 #define LCD_BIT_RS 1 #define LCD_BIT_RW 2 #define LCD_BIT_BL 3 #define LCD_BIT_CL 4 #define LCD_BIT_DA 5 #define LCD_BITS 6 /* * each bit can be either connected to a DATA or CTRL port */ #define LCD_PORT_C 0 #define LCD_PORT_D 1 #define LCD_PORTS 2 static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES]; /* * LCD protocols */ #define LCD_PROTO_PARALLEL 0 #define LCD_PROTO_SERIAL 1 #define LCD_PROTO_TI_DA8XX_LCD 2 /* * LCD character sets */ #define LCD_CHARSET_NORMAL 0 #define LCD_CHARSET_KS0074 1 /* * LCD types */ #define LCD_TYPE_NONE 0 #define LCD_TYPE_OLD 1 #define LCD_TYPE_KS0074 2 #define LCD_TYPE_HANTRONIX 3 #define LCD_TYPE_NEXCOM 4 #define LCD_TYPE_CUSTOM 5 /* * keypad types */ #define KEYPAD_TYPE_NONE 0 #define KEYPAD_TYPE_OLD 1 #define KEYPAD_TYPE_NEW 2 #define KEYPAD_TYPE_NEXCOM 3 /* * panel profiles */ #define PANEL_PROFILE_CUSTOM 0 #define PANEL_PROFILE_OLD 1 #define PANEL_PROFILE_NEW 2 #define PANEL_PROFILE_HANTRONIX 3 #define PANEL_PROFILE_NEXCOM 4 #define PANEL_PROFILE_LARGE 5 /* * Construct custom config from the kernel's configuration */ #define DEFAULT_PROFILE PANEL_PROFILE_LARGE #define DEFAULT_PARPORT 0 #define DEFAULT_LCD LCD_TYPE_OLD #define DEFAULT_KEYPAD KEYPAD_TYPE_OLD #define DEFAULT_LCD_WIDTH 40 #define DEFAULT_LCD_BWIDTH 40 #define DEFAULT_LCD_HWIDTH 64 #define DEFAULT_LCD_HEIGHT 2 #define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL #define DEFAULT_LCD_PIN_E PIN_AUTOLF #define DEFAULT_LCD_PIN_RS PIN_SELECP #define DEFAULT_LCD_PIN_RW PIN_INITP #define DEFAULT_LCD_PIN_SCL PIN_STROBE #define DEFAULT_LCD_PIN_SDA PIN_D0 #define DEFAULT_LCD_PIN_BL PIN_NOT_SET #define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL #ifdef CONFIG_PANEL_PROFILE #undef DEFAULT_PROFILE #define DEFAULT_PROFILE CONFIG_PANEL_PROFILE #endif #ifdef CONFIG_PANEL_PARPORT #undef DEFAULT_PARPORT #define DEFAULT_PARPORT CONFIG_PANEL_PARPORT #endif #if DEFAULT_PROFILE == 0 /* custom */ #ifdef CONFIG_PANEL_KEYPAD #undef DEFAULT_KEYPAD #define DEFAULT_KEYPAD CONFIG_PANEL_KEYPAD #endif #ifdef CONFIG_PANEL_LCD #undef DEFAULT_LCD #define DEFAULT_LCD CONFIG_PANEL_LCD #endif #ifdef CONFIG_PANEL_LCD_WIDTH #undef DEFAULT_LCD_WIDTH #define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH #endif #ifdef CONFIG_PANEL_LCD_BWIDTH #undef DEFAULT_LCD_BWIDTH #define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH #endif #ifdef CONFIG_PANEL_LCD_HWIDTH #undef DEFAULT_LCD_HWIDTH #define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH #endif #ifdef CONFIG_PANEL_LCD_HEIGHT #undef DEFAULT_LCD_HEIGHT #define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT #endif #ifdef CONFIG_PANEL_LCD_PROTO #undef DEFAULT_LCD_PROTO #define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO #endif #ifdef CONFIG_PANEL_LCD_PIN_E #undef DEFAULT_LCD_PIN_E #define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E #endif #ifdef CONFIG_PANEL_LCD_PIN_RS #undef DEFAULT_LCD_PIN_RS #define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS #endif #ifdef CONFIG_PANEL_LCD_PIN_RW #undef DEFAULT_LCD_PIN_RW #define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW #endif #ifdef CONFIG_PANEL_LCD_PIN_SCL #undef DEFAULT_LCD_PIN_SCL #define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL #endif #ifdef CONFIG_PANEL_LCD_PIN_SDA #undef DEFAULT_LCD_PIN_SDA #define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA #endif #ifdef CONFIG_PANEL_LCD_PIN_BL #undef DEFAULT_LCD_PIN_BL #define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL #endif #ifdef CONFIG_PANEL_LCD_CHARSET #undef DEFAULT_LCD_CHARSET #define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET #endif #endif /* DEFAULT_PROFILE == 0 */ /* global variables */ static int keypad_open_cnt; /* #times opened */ static int lcd_open_cnt; /* #times opened */ static struct pardevice *pprt; static int lcd_initialized; static int keypad_initialized; static int light_tempo; static char lcd_must_clear; static char lcd_left_shift; static char init_in_progress; static void (*lcd_write_cmd) (int); static void (*lcd_write_data) (int); static void (*lcd_clear_fast) (void); static DEFINE_SPINLOCK(pprt_lock); static struct timer_list scan_timer; MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver"); static int parport = -1; module_param(parport, int, 0000); MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)"); static int lcd_height = -1; module_param(lcd_height, int, 0000); MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD"); static int lcd_width = -1; module_param(lcd_width, int, 0000); MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD"); static int lcd_bwidth = -1; /* internal buffer width (usually 40) */ module_param(lcd_bwidth, int, 0000); MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)"); static int lcd_hwidth = -1; /* hardware buffer width (usually 64) */ module_param(lcd_hwidth, int, 0000); MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)"); static int lcd_enabled = -1; module_param(lcd_enabled, int, 0000); MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead"); static int keypad_enabled = -1; module_param(keypad_enabled, int, 0000); MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead"); static int lcd_type = -1; module_param(lcd_type, int, 0000); MODULE_PARM_DESC(lcd_type, "LCD type: 0=none, 1=old //, 2=serial ks0074, " "3=hantronix //, 4=nexcom //, 5=compiled-in"); static int lcd_proto = -1; module_param(lcd_proto, int, 0000); MODULE_PARM_DESC(lcd_proto, "LCD communication: 0=parallel (//), 1=serial," "2=TI LCD Interface"); static int lcd_charset = -1; module_param(lcd_charset, int, 0000); MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074"); static int keypad_type = -1; module_param(keypad_type, int, 0000); MODULE_PARM_DESC(keypad_type, "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, " "3=nexcom 4 keys"); static int profile = DEFAULT_PROFILE; module_param(profile, int, 0000); MODULE_PARM_DESC(profile, "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; " "4=16x2 nexcom; default=40x2, old kp"); /* * These are the parallel port pins the LCD control signals are connected to. * Set this to 0 if the signal is not used. Set it to its opposite value * (negative) if the signal is negated. -MAXINT is used to indicate that the * pin has not been explicitly specified. * * WARNING! no check will be performed about collisions with keypad ! */ static int lcd_e_pin = PIN_NOT_SET; module_param(lcd_e_pin, int, 0000); MODULE_PARM_DESC(lcd_e_pin, "# of the // port pin connected to LCD 'E' signal, " "with polarity (-17..17)"); static int lcd_rs_pin = PIN_NOT_SET; module_param(lcd_rs_pin, int, 0000); MODULE_PARM_DESC(lcd_rs_pin, "# of the // port pin connected to LCD 'RS' signal, " "with polarity (-17..17)"); static int lcd_rw_pin = PIN_NOT_SET; module_param(lcd_rw_pin, int, 0000); MODULE_PARM_DESC(lcd_rw_pin, "# of the // port pin connected to LCD 'RW' signal, " "with polarity (-17..17)"); static int lcd_bl_pin = PIN_NOT_SET; module_param(lcd_bl_pin, int, 0000); MODULE_PARM_DESC(lcd_bl_pin, "# of the // port pin connected to LCD backlight, " "with polarity (-17..17)"); static int lcd_da_pin = PIN_NOT_SET; module_param(lcd_da_pin, int, 0000); MODULE_PARM_DESC(lcd_da_pin, "# of the // port pin connected to serial LCD 'SDA' " "signal, with polarity (-17..17)"); static int lcd_cl_pin = PIN_NOT_SET; module_param(lcd_cl_pin, int, 0000); MODULE_PARM_DESC(lcd_cl_pin, "# of the // port pin connected to serial LCD 'SCL' " "signal, with polarity (-17..17)"); static unsigned char *lcd_char_conv; /* for some LCD drivers (ks0074) we need a charset conversion table. */ static unsigned char lcd_char_conv_ks0074[256] = { /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */ /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27, /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4, /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20, /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f, /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96, /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd, /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60, /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9, /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3, /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78, /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe, /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8, /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69, /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25, /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79, }; char old_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4", "Esc\n", "Esc\n", ""}, {"S5", "Ret\n", "Ret\n", ""}, {"", "", "", ""} }; /* signals, press, repeat, release */ char new_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4s5", "", "Esc\n", "Esc\n"}, {"s4S5", "", "Ret\n", "Ret\n"}, {"S4S5", "Help\n", "", ""}, /* add new signals above this line */ {"", "", "", ""} }; /* signals, press, repeat, release */ char nexcom_keypad_profile[][4][9] = { {"a-p-e-", "Down\n", "Down\n", ""}, {"a-p-E-", "Ret\n", "Ret\n", ""}, {"a-P-E-", "Esc\n", "Esc\n", ""}, {"a-P-e-", "Up\n", "Up\n", ""}, /* add new signals above this line */ {"", "", "", ""} }; static char (*keypad_profile)[4][9] = old_keypad_profile; /* FIXME: this should be converted to a bit array containing signals states */ static struct { unsigned char e; /* parallel LCD E (data latch on falling edge) */ unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */ unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */ unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */ unsigned char cl; /* serial LCD clock (latch on rising edge) */ unsigned char da; /* serial LCD data */ } bits; static void init_scan_timer(void); /* sets data port bits according to current signals values */ static int set_data_bits(void) { int val, bit; val = r_dtr(pprt); for (bit = 0; bit < LCD_BITS; bit++) val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK]; val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e] | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs] | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw] | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl] | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl] | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da]; w_dtr(pprt, val); return val; } /* sets ctrl port bits according to current signals values */ static int set_ctrl_bits(void) { int val, bit; val = r_ctr(pprt); for (bit = 0; bit < LCD_BITS; bit++) val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK]; val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e] | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs] | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw] | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl] | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl] | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da]; w_ctr(pprt, val); return val; } /* sets ctrl & data port bits according to current signals values */ static void panel_set_bits(void) { set_data_bits(); set_ctrl_bits(); } /* * Converts a parallel port pin (from -25 to 25) to data and control ports * masks, and data and control port bits. The signal will be considered * unconnected if it's on pin 0 or an invalid pin (<-25 or >25). * * Result will be used this way : * out(dport, in(dport) & d_val[2] | d_val[signal_state]) * out(cport, in(cport) & c_val[2] | c_val[signal_state]) */ void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val) { int d_bit, c_bit, inv; d_val[0] = c_val[0] = d_val[1] = c_val[1] = 0; d_val[2] = c_val[2] = 0xFF; if (pin == 0) return; inv = (pin < 0); if (inv) pin = -pin; d_bit = c_bit = 0; switch (pin) { case PIN_STROBE: /* strobe, inverted */ c_bit = PNL_PSTROBE; inv = !inv; break; case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */ d_bit = 1 << (pin - 2); break; case PIN_AUTOLF: /* autofeed, inverted */ c_bit = PNL_PAUTOLF; inv = !inv; break; case PIN_INITP: /* init, direct */ c_bit = PNL_PINITP; break; case PIN_SELECP: /* select_in, inverted */ c_bit = PNL_PSELECP; inv = !inv; break; default: /* unknown pin, ignore */ break; } if (c_bit) { c_val[2] &= ~c_bit; c_val[!inv] = c_bit; } else if (d_bit) { d_val[2] &= ~d_bit; d_val[!inv] = d_bit; } } /* sleeps that many milliseconds with a reschedule */ static void long_sleep(int ms) { if (in_interrupt()) mdelay(ms); else { current->state = TASK_INTERRUPTIBLE; schedule_timeout((ms * HZ + 999) / 1000); } } /* send a serial byte to the LCD panel. The caller is responsible for locking if needed. */ static void lcd_send_serial(int byte) { int bit; /* the data bit is set on D0, and the clock on STROBE. * LCD reads D0 on STROBE's rising edge. */ for (bit = 0; bit < 8; bit++) { bits.cl = BIT_CLR; /* CLK low */ panel_set_bits(); bits.da = byte & 1; panel_set_bits(); udelay(2); /* maintain the data during 2 us before CLK up */ bits.cl = BIT_SET; /* CLK high */ panel_set_bits(); udelay(1); /* maintain the strobe during 1 us */ byte >>= 1; } } /* turn the backlight on or off */ static void lcd_backlight(int on) { if (lcd_bl_pin == PIN_NONE) return; /* The backlight is activated by seting the AUTOFEED line to +5V */ spin_lock(&pprt_lock); bits.bl = on; panel_set_bits(); spin_unlock(&pprt_lock); } /* send a command to the LCD panel in serial mode */ static void lcd_write_cmd_s(int cmd) { spin_lock(&pprt_lock); lcd_send_serial(0x1F); /* R/W=W, RS=0 */ lcd_send_serial(cmd & 0x0F); lcd_send_serial((cmd >> 4) & 0x0F); udelay(40); /* the shortest command takes at least 40 us */ spin_unlock(&pprt_lock); } /* send data to the LCD panel in serial mode */ static void lcd_write_data_s(int data) { spin_lock(&pprt_lock); lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(data & 0x0F); lcd_send_serial((data >> 4) & 0x0F); udelay(40); /* the shortest data takes at least 40 us */ spin_unlock(&pprt_lock); } /* send a command to the LCD panel in 8 bits parallel mode */ static void lcd_write_cmd_p8(int cmd) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, cmd); udelay(20); /* maintain the data during 20 us before the strobe */ bits.e = BIT_SET; bits.rs = BIT_CLR; bits.rw = BIT_CLR; set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ bits.e = BIT_CLR; set_ctrl_bits(); udelay(120); /* the shortest command takes at least 120 us */ spin_unlock(&pprt_lock); } /* send data to the LCD panel in 8 bits parallel mode */ static void lcd_write_data_p8(int data) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(20); /* maintain the data during 20 us before the strobe */ bits.e = BIT_SET; bits.rs = BIT_SET; bits.rw = BIT_CLR; set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ bits.e = BIT_CLR; set_ctrl_bits(); udelay(45); /* the shortest data takes at least 45 us */ spin_unlock(&pprt_lock); } /* send a command to the TI LCD panel */ static void lcd_write_cmd_tilcd(int cmd) { spin_lock(&pprt_lock); /* present the data to the control port */ w_ctr(pprt, cmd); udelay(60); spin_unlock(&pprt_lock); } /* send data to the TI LCD panel */ static void lcd_write_data_tilcd(int data) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(60); spin_unlock(&pprt_lock); } static void lcd_gotoxy(void) { lcd_write_cmd(0x80 /* set DDRAM address */ | (lcd_addr_y ? lcd_hwidth : 0) /* we force the cursor to stay at the end of the line if it wants to go farther */ | ((lcd_addr_x < lcd_bwidth) ? lcd_addr_x & (lcd_hwidth - 1) : lcd_bwidth - 1)); } static void lcd_print(char c) { if (lcd_addr_x < lcd_bwidth) { if (lcd_char_conv != NULL) c = lcd_char_conv[(unsigned char)c]; lcd_write_data(c); lcd_addr_x++; } /* prevents the cursor from wrapping onto the next line */ if (lcd_addr_x == lcd_bwidth) lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_s(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(' ' & 0x0F); lcd_send_serial((' ' >> 4) & 0x0F); udelay(40); /* the shortest data takes at least 40 us */ } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_p8(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { /* present the data to the data port */ w_dtr(pprt, ' '); /* maintain the data during 20 us before the strobe */ udelay(20); bits.e = BIT_SET; bits.rs = BIT_SET; bits.rw = BIT_CLR; set_ctrl_bits(); /* maintain the strobe during 40 us */ udelay(40); bits.e = BIT_CLR; set_ctrl_bits(); /* the shortest data takes at least 45 us */ udelay(45); } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_tilcd(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { /* present the data to the data port */ w_dtr(pprt, ' '); udelay(60); } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* clears the display and resets X/Y */ static void lcd_clear_display(void) { lcd_write_cmd(0x01); /* clear display */ lcd_addr_x = lcd_addr_y = 0; /* we must wait a few milliseconds (15) */ long_sleep(15); } static void lcd_init_display(void) { lcd_flags = ((lcd_height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B; long_sleep(20); /* wait 20 ms after power-up for the paranoid */ lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30 /* set font height and lines number */ | ((lcd_flags & LCD_FLAG_F) ? 4 : 0) | ((lcd_flags & LCD_FLAG_N) ? 8 : 0) ); long_sleep(10); lcd_write_cmd(0x08); /* display off, cursor off, blink off */ long_sleep(10); lcd_write_cmd(0x08 /* set display mode */ | ((lcd_flags & LCD_FLAG_D) ? 4 : 0) | ((lcd_flags & LCD_FLAG_C) ? 2 : 0) | ((lcd_flags & LCD_FLAG_B) ? 1 : 0) ); lcd_backlight((lcd_flags & LCD_FLAG_L) ? 1 : 0); long_sleep(10); /* entry mode set : increment, cursor shifting */ lcd_write_cmd(0x06); lcd_clear_display(); } /* * These are the file operation function for user access to /dev/lcd * This function can also be called from inside the kernel, by * setting file and ppos to NULL. * */ static inline int handle_lcd_special_code(void) { /* LCD special codes */ int processed = 0; char *esc = lcd_escape + 2; int oldflags = lcd_flags; /* check for display mode flags */ switch (*esc) { case 'D': /* Display ON */ lcd_flags |= LCD_FLAG_D; processed = 1; break; case 'd': /* Display OFF */ lcd_flags &= ~LCD_FLAG_D; processed = 1; break; case 'C': /* Cursor ON */ lcd_flags |= LCD_FLAG_C; processed = 1; break; case 'c': /* Cursor OFF */ lcd_flags &= ~LCD_FLAG_C; processed = 1; break; case 'B': /* Blink ON */ lcd_flags |= LCD_FLAG_B; processed = 1; break; case 'b': /* Blink OFF */ lcd_flags &= ~LCD_FLAG_B; processed = 1; break; case '+': /* Back light ON */ lcd_flags |= LCD_FLAG_L; processed = 1; break; case '-': /* Back light OFF */ lcd_flags &= ~LCD_FLAG_L; processed = 1; break; case '*': /* flash back light using the keypad timer */ if (scan_timer.function != NULL) { if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(1); light_tempo = FLASH_LIGHT_TEMPO; } processed = 1; break; case 'f': /* Small Font */ lcd_flags &= ~LCD_FLAG_F; processed = 1; break; case 'F': /* Large Font */ lcd_flags |= LCD_FLAG_F; processed = 1; break; case 'n': /* One Line */ lcd_flags &= ~LCD_FLAG_N; processed = 1; break; case 'N': /* Two Lines */ lcd_flags |= LCD_FLAG_N; break; case 'l': /* Shift Cursor Left */ if (lcd_addr_x > 0) { /* back one char if not at end of line */ if (lcd_addr_x < lcd_bwidth) lcd_write_cmd(0x10); lcd_addr_x--; } processed = 1; break; case 'r': /* shift cursor right */ if (lcd_addr_x < lcd_width) { /* allow the cursor to pass the end of the line */ if (lcd_addr_x < (lcd_bwidth - 1)) lcd_write_cmd(0x14); lcd_addr_x++; } processed = 1; break; case 'L': /* shift display left */ lcd_left_shift++; lcd_write_cmd(0x18); processed = 1; break; case 'R': /* shift display right */ lcd_left_shift--; lcd_write_cmd(0x1C); processed = 1; break; case 'k': { /* kill end of line */ int x; for (x = lcd_addr_x; x < lcd_bwidth; x++) lcd_write_data(' '); /* restore cursor position */ lcd_gotoxy(); processed = 1; break; } case 'I': /* reinitialize display */ lcd_init_display(); lcd_left_shift = 0; processed = 1; break; case 'G': { /* Generator : LGcxxxxx...xx; must have <c> between '0' * and '7', representing the numerical ASCII code of the * redefined character, and <xx...xx> a sequence of 16 * hex digits representing 8 bytes for each character. * Most LCDs will only use 5 lower bits of the 7 first * bytes. */ unsigned char cgbytes[8]; unsigned char cgaddr; int cgoffset; int shift; char value; int addr; if (strchr(esc, ';') == NULL) break; esc++; cgaddr = *(esc++) - '0'; if (cgaddr > 7) { processed = 1; break; } cgoffset = 0; shift = 0; value = 0; while (*esc && cgoffset < 8) { shift ^= 4; if (*esc >= '0' && *esc <= '9') value |= (*esc - '0') << shift; else if (*esc >= 'A' && *esc <= 'Z') value |= (*esc - 'A' + 10) << shift; else if (*esc >= 'a' && *esc <= 'z') value |= (*esc - 'a' + 10) << shift; else { esc++; continue; } if (shift == 0) { cgbytes[cgoffset++] = value; value = 0; } esc++; } lcd_write_cmd(0x40 | (cgaddr * 8)); for (addr = 0; addr < cgoffset; addr++) lcd_write_data(cgbytes[addr]); /* ensures that we stop writing to CGRAM */ lcd_gotoxy(); processed = 1; break; } case 'x': /* gotoxy : LxXXX[yYYY]; */ case 'y': /* gotoxy : LyYYY[xXXX]; */ if (strchr(esc, ';') == NULL) break; while (*esc) { if (*esc == 'x') { esc++; if (kstrtoul(esc, 10, &lcd_addr_x) < 0) break; } else if (*esc == 'y') { esc++; if (kstrtoul(esc, 10, &lcd_addr_y) < 0) break; } else break; } lcd_gotoxy(); processed = 1; break; } /* Check wether one flag was changed */ if (oldflags != lcd_flags) { /* check whether one of B,C,D flags were changed */ if ((oldflags ^ lcd_flags) & (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D)) /* set display mode */ lcd_write_cmd(0x08 | ((lcd_flags & LCD_FLAG_D) ? 4 : 0) | ((lcd_flags & LCD_FLAG_C) ? 2 : 0) | ((lcd_flags & LCD_FLAG_B) ? 1 : 0)); /* check whether one of F,N flags was changed */ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_F | LCD_FLAG_N)) lcd_write_cmd(0x30 | ((lcd_flags & LCD_FLAG_F) ? 4 : 0) | ((lcd_flags & LCD_FLAG_N) ? 8 : 0)); /* check wether L flag was changed */ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_L)) { if (lcd_flags & (LCD_FLAG_L)) lcd_backlight(1); else if (light_tempo == 0) /* switch off the light only when the tempo lighting is gone */ lcd_backlight(0); } } return processed; } static ssize_t lcd_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { const char *tmp = buf; char c; for (; count-- > 0; (ppos ? (*ppos)++ : 0), ++tmp) { if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) /* let's be a little nice with other processes that need some CPU */ schedule(); if (ppos == NULL && file == NULL) /* let's not use get_user() from the kernel ! */ c = *tmp; else if (get_user(c, tmp)) return -EFAULT; /* first, we'll test if we're in escape mode */ if ((c != '\n') && lcd_escape_len >= 0) { /* yes, let's add this char to the buffer */ lcd_escape[lcd_escape_len++] = c; lcd_escape[lcd_escape_len] = 0; } else { /* aborts any previous escape sequence */ lcd_escape_len = -1; switch (c) { case LCD_ESCAPE_CHAR: /* start of an escape sequence */ lcd_escape_len = 0; lcd_escape[lcd_escape_len] = 0; break; case '\b': /* go back one char and clear it */ if (lcd_addr_x > 0) { /* check if we're not at the end of the line */ if (lcd_addr_x < lcd_bwidth) /* back one char */ lcd_write_cmd(0x10); lcd_addr_x--; } /* replace with a space */ lcd_write_data(' '); /* back one char again */ lcd_write_cmd(0x10); break; case '\014': /* quickly clear the display */ lcd_clear_fast(); break; case '\n': /* flush the remainder of the current line and go to the beginning of the next line */ for (; lcd_addr_x < lcd_bwidth; lcd_addr_x++) lcd_write_data(' '); lcd_addr_x = 0; lcd_addr_y = (lcd_addr_y + 1) % lcd_height; lcd_gotoxy(); break; case '\r': /* go to the beginning of the same line */ lcd_addr_x = 0; lcd_gotoxy(); break; case '\t': /* print a space instead of the tab */ lcd_print(' '); break; default: /* simply print this char */ lcd_print(c); break; } } /* now we'll see if we're in an escape mode and if the current escape sequence can be understood. */ if (lcd_escape_len >= 2) { int processed = 0; if (!strcmp(lcd_escape, "[2J")) { /* clear the display */ lcd_clear_fast(); processed = 1; } else if (!strcmp(lcd_escape, "[H")) { /* cursor to home */ lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); processed = 1; } /* codes starting with ^[[L */ else if ((lcd_escape_len >= 3) && (lcd_escape[0] == '[') && (lcd_escape[1] == 'L')) { processed = handle_lcd_special_code(); } /* LCD special escape codes */ /* flush the escape sequence if it's been processed or if it is getting too long. */ if (processed || (lcd_escape_len >= LCD_ESCAPE_LEN)) lcd_escape_len = -1; } /* escape codes */ } return tmp - buf; } static int lcd_open(struct inode *inode, struct file *file) { if (lcd_open_cnt) return -EBUSY; /* open only once at a time */ if (file->f_mode & FMODE_READ) /* device is write-only */ return -EPERM; if (lcd_must_clear) { lcd_clear_display(); lcd_must_clear = 0; } lcd_open_cnt++; return nonseekable_open(inode, file); } static int lcd_release(struct inode *inode, struct file *file) { lcd_open_cnt--; return 0; } static const struct file_operations lcd_fops = { .write = lcd_write, .open = lcd_open, .release = lcd_release, .llseek = no_llseek, }; static struct miscdevice lcd_dev = { LCD_MINOR, "lcd", &lcd_fops }; /* public function usable from the kernel for any purpose */ void panel_lcd_print(char *s) { if (lcd_enabled && lcd_initialized) lcd_write(NULL, s, strlen(s), NULL); } /* initialize the LCD driver */ void lcd_init(void) { switch (lcd_type) { case LCD_TYPE_OLD: /* parallel mode, 8 bits */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_STROBE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_AUTOLF; if (lcd_width < 0) lcd_width = 40; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_KS0074: /* serial mode, ks0074 */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_SERIAL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_KS0074; if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = PIN_AUTOLF; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = PIN_STROBE; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = PIN_D0; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 16; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_NEXCOM: /* parallel mode, 8 bits, generic */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_AUTOLF; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_SELECP; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = PIN_INITP; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_CUSTOM: /* customer-defined */ if (lcd_proto < 0) lcd_proto = DEFAULT_LCD_PROTO; if (lcd_charset < 0) lcd_charset = DEFAULT_LCD_CHARSET; /* default geometry will be set later */ break; case LCD_TYPE_HANTRONIX: /* parallel mode, 8 bits, hantronix-like */ default: if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_STROBE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_SELECP; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; } /* this is used to catch wrong and default values */ if (lcd_width <= 0) lcd_width = DEFAULT_LCD_WIDTH; if (lcd_bwidth <= 0) lcd_bwidth = DEFAULT_LCD_BWIDTH; if (lcd_hwidth <= 0) lcd_hwidth = DEFAULT_LCD_HWIDTH; if (lcd_height <= 0) lcd_height = DEFAULT_LCD_HEIGHT; if (lcd_proto == LCD_PROTO_SERIAL) { /* SERIAL */ lcd_write_cmd = lcd_write_cmd_s; lcd_write_data = lcd_write_data_s; lcd_clear_fast = lcd_clear_fast_s; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = DEFAULT_LCD_PIN_SCL; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = DEFAULT_LCD_PIN_SDA; } else if (lcd_proto == LCD_PROTO_PARALLEL) { /* PARALLEL */ lcd_write_cmd = lcd_write_cmd_p8; lcd_write_data = lcd_write_data_p8; lcd_clear_fast = lcd_clear_fast_p8; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = DEFAULT_LCD_PIN_E; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = DEFAULT_LCD_PIN_RS; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = DEFAULT_LCD_PIN_RW; } else { lcd_write_cmd = lcd_write_cmd_tilcd; lcd_write_data = lcd_write_data_tilcd; lcd_clear_fast = lcd_clear_fast_tilcd; } if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = DEFAULT_LCD_PIN_BL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_NONE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_NONE; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = PIN_NONE; if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = PIN_NONE; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = PIN_NONE; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = PIN_NONE; if (lcd_charset < 0) lcd_charset = DEFAULT_LCD_CHARSET; if (lcd_charset == LCD_CHARSET_KS0074) lcd_char_conv = lcd_char_conv_ks0074; else lcd_char_conv = NULL; if (lcd_bl_pin != PIN_NONE) init_scan_timer(); pin_to_bits(lcd_e_pin, lcd_bits[LCD_PORT_D][LCD_BIT_E], lcd_bits[LCD_PORT_C][LCD_BIT_E]); pin_to_bits(lcd_rs_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RS], lcd_bits[LCD_PORT_C][LCD_BIT_RS]); pin_to_bits(lcd_rw_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RW], lcd_bits[LCD_PORT_C][LCD_BIT_RW]); pin_to_bits(lcd_bl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_BL], lcd_bits[LCD_PORT_C][LCD_BIT_BL]); pin_to_bits(lcd_cl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_CL], lcd_bits[LCD_PORT_C][LCD_BIT_CL]); pin_to_bits(lcd_da_pin, lcd_bits[LCD_PORT_D][LCD_BIT_DA], lcd_bits[LCD_PORT_C][LCD_BIT_DA]); /* before this line, we must NOT send anything to the display. * Since lcd_init_display() needs to write data, we have to * enable mark the LCD initialized just before. */ lcd_initialized = 1; lcd_init_display(); /* display a short message */ #ifdef CONFIG_PANEL_CHANGE_MESSAGE #ifdef CONFIG_PANEL_BOOT_MESSAGE panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE); #endif #else panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-" PANEL_VERSION); #endif lcd_addr_x = lcd_addr_y = 0; /* clear the display on the next device opening */ lcd_must_clear = 1; lcd_gotoxy(); } /* * These are the file operation function for user access to /dev/keypad */ static ssize_t keypad_read(struct file *file, char *buf, size_t count, loff_t *ppos) { unsigned i = *ppos; char *tmp = buf; if (keypad_buflen == 0) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&keypad_read_wait); if (signal_pending(current)) return -EINTR; } for (; count-- > 0 && (keypad_buflen > 0); ++i, ++tmp, --keypad_buflen) { put_user(keypad_buffer[keypad_start], tmp); keypad_start = (keypad_start + 1) % KEYPAD_BUFFER; } *ppos = i; return tmp - buf; } static int keypad_open(struct inode *inode, struct file *file) { if (keypad_open_cnt) return -EBUSY; /* open only once at a time */ if (file->f_mode & FMODE_WRITE) /* device is read-only */ return -EPERM; keypad_buflen = 0; /* flush the buffer on opening */ keypad_open_cnt++; return 0; } static int keypad_release(struct inode *inode, struct file *file) { keypad_open_cnt--; return 0; } static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ .llseek = default_llseek, }; static struct miscdevice keypad_dev = { KEYPAD_MINOR, "keypad", &keypad_fops }; static void keypad_send_key(char *string, int max_len) { if (init_in_progress) return; /* send the key to the device only if a process is attached to it. */ if (keypad_open_cnt > 0) { while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) { keypad_buffer[(keypad_start + keypad_buflen++) % KEYPAD_BUFFER] = *string++; } wake_up_interruptible(&keypad_read_wait); } } /* this function scans all the bits involving at least one logical signal, * and puts the results in the bitfield "phys_read" (one bit per established * contact), and sets "phys_read_prev" to "phys_read". * * Note: to debounce input signals, we will only consider as switched a signal * which is stable across 2 measures. Signals which are different between two * reads will be kept as they previously were in their logical form (phys_prev). * A signal which has just switched will have a 1 in * (phys_read ^ phys_read_prev). */ static void phys_scan_contacts(void) { int bit, bitval; char oldval; char bitmask; char gndmask; phys_prev = phys_curr; phys_read_prev = phys_read; phys_read = 0; /* flush all signals */ /* keep track of old value, with all outputs disabled */ oldval = r_dtr(pprt) | scan_mask_o; /* activate all keyboard outputs (active low) */ w_dtr(pprt, oldval & ~scan_mask_o); /* will have a 1 for each bit set to gnd */ bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* disable all matrix signals */ w_dtr(pprt, oldval); /* now that all outputs are cleared, the only active input bits are * directly connected to the ground */ /* 1 for each grounded input */ gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* grounded inputs are signals 40-44 */ phys_read |= (pmask_t) gndmask << 40; if (bitmask != gndmask) { /* since clearing the outputs changed some inputs, we know * that some input signals are currently tied to some outputs. * So we'll scan them. */ for (bit = 0; bit < 8; bit++) { bitval = 1 << bit; if (!(scan_mask_o & bitval)) /* skip unused bits */ continue; w_dtr(pprt, oldval & ~bitval); /* enable this output */ bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask; phys_read |= (pmask_t) bitmask << (5 * bit); } w_dtr(pprt, oldval); /* disable all outputs */ } /* this is easy: use old bits when they are flapping, * use new ones when stable */ phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) | (phys_read & ~(phys_read ^ phys_read_prev)); } static inline int input_state_high(struct logical_input *input) { #if 0 /* FIXME: * this is an invalid test. It tries to catch * transitions from single-key to multiple-key, but * doesn't take into account the contacts polarity. * The only solution to the problem is to parse keys * from the most complex to the simplest combinations, * and mark them as 'caught' once a combination * matches, then unmatch it for all other ones. */ /* try to catch dangerous transitions cases : * someone adds a bit, so this signal was a false * positive resulting from a transition. We should * invalidate the signal immediately and not call the * release function. * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release. */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return 1; } #endif if ((phys_curr & input->mask) == input->value) { if ((input->type == INPUT_TYPE_STD) && (input->high_timer == 0)) { input->high_timer++; if (input->u.std.press_fct != NULL) input->u.std.press_fct(input->u.std.press_data); } else if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->high_timer == 0) { char *press_str = input->u.kbd.press_str; if (press_str[0]) keypad_send_key(press_str, sizeof(press_str)); } if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) { input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, sizeof(repeat_str)); } /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } return 1; } else { /* else signal falling down. Let's fall through. */ input->state = INPUT_ST_FALLING; input->fall_timer = 0; } return 0; } static inline void input_state_falling(struct logical_input *input) { #if 0 /* FIXME !!! same comment as in input_state_high */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return; } #endif if ((phys_curr & input->mask) == input->value) { if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, sizeof(repeat_str)); /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } input->state = INPUT_ST_HIGH; } else if (input->fall_timer >= input->fall_time) { /* call release event */ if (input->type == INPUT_TYPE_STD) { void (*release_fct)(int) = input->u.std.release_fct; if (release_fct != NULL) release_fct(input->u.std.release_data); } else if (input->type == INPUT_TYPE_KBD) { char *release_str = input->u.kbd.release_str; if (release_str[0]) keypad_send_key(release_str, sizeof(release_str)); } input->state = INPUT_ST_LOW; } else { input->fall_timer++; inputs_stable = 0; } } static void panel_process_inputs(void) { struct list_head *item; struct logical_input *input; #if 0 printk(KERN_DEBUG "entering panel_process_inputs with pp=%016Lx & pc=%016Lx\n", phys_prev, phys_curr); #endif keypressed = 0; inputs_stable = 1; list_for_each(item, &logical_inputs) { input = list_entry(item, struct logical_input, list); switch (input->state) { case INPUT_ST_LOW: if ((phys_curr & input->mask) != input->value) break; /* if all needed ones were already set previously, * this means that this logical signal has been * activated by the releasing of another combined * signal, so we don't want to match. * eg: AB -(release B)-> A -(release A)-> 0 : * don't match A. */ if ((phys_prev & input->mask) == input->value) break; input->rise_timer = 0; input->state = INPUT_ST_RISING; /* no break here, fall through */ case INPUT_ST_RISING: if ((phys_curr & input->mask) != input->value) { input->state = INPUT_ST_LOW; break; } if (input->rise_timer < input->rise_time) { inputs_stable = 0; input->rise_timer++; break; } input->high_timer = 0; input->state = INPUT_ST_HIGH; /* no break here, fall through */ case INPUT_ST_HIGH: if (input_state_high(input)) break; /* no break here, fall through */ case INPUT_ST_FALLING: input_state_falling(input); } } } static void panel_scan_timer(void) { if (keypad_enabled && keypad_initialized) { if (spin_trylock(&pprt_lock)) { phys_scan_contacts(); /* no need for the parport anymore */ spin_unlock(&pprt_lock); } if (!inputs_stable || phys_curr != phys_prev) panel_process_inputs(); } if (lcd_enabled && lcd_initialized) { if (keypressed) { if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(1); light_tempo = FLASH_LIGHT_TEMPO; } else if (light_tempo > 0) { light_tempo--; if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(0); } } mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME); } static void init_scan_timer(void) { if (scan_timer.function != NULL) return; /* already started */ init_timer(&scan_timer); scan_timer.expires = jiffies + INPUT_POLL_TIME; scan_timer.data = 0; scan_timer.function = (void *)&panel_scan_timer; add_timer(&scan_timer); } /* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits. * if <omask> or <imask> are non-null, they will be or'ed with the bits * corresponding to out and in bits respectively. * returns 1 if ok, 0 if error (in which case, nothing is written). */ static int input_name2mask(char *name, pmask_t *mask, pmask_t *value, char *imask, char *omask) { static char sigtab[10] = "EeSsPpAaBb"; char im, om; pmask_t m, v; om = im = m = v = 0ULL; while (*name) { int in, out, bit, neg; for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name); in++) ; if (in >= sizeof(sigtab)) return 0; /* input name not found */ neg = (in & 1); /* odd (lower) names are negated */ in >>= 1; im |= (1 << in); name++; if (isdigit(*name)) { out = *name - '0'; om |= (1 << out); } else if (*name == '-') out = 8; else return 0; /* unknown bit name */ bit = (out * 5) + in; m |= 1ULL << bit; if (!neg) v |= 1ULL << bit; name++; } *mask = m; *value = v; if (imask) *imask |= im; if (omask) *omask |= om; return 1; } /* tries to bind a key to the signal name <name>. The key will send the * strings <press>, <repeat>, <release> for these respective events. * Returns the pointer to the new key if ok, NULL if the key could not be bound. */ static struct logical_input *panel_bind_key(char *name, char *press, char *repeat, char *release) { struct logical_input *key; key = kzalloc(sizeof(struct logical_input), GFP_KERNEL); if (!key) { printk(KERN_ERR "panel: not enough memory\n"); return NULL; } if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i, &scan_mask_o)) { kfree(key); return NULL; } key->type = INPUT_TYPE_KBD; key->state = INPUT_ST_LOW; key->rise_time = 1; key->fall_time = 1; #if 0 printk(KERN_DEBUG "bind: <%s> : m=%016Lx v=%016Lx\n", name, key->mask, key->value); #endif strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str)); strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str)); strncpy(key->u.kbd.release_str, release, sizeof(key->u.kbd.release_str)); list_add(&key->list, &logical_inputs); return key; } #if 0 /* tries to bind a callback function to the signal name <name>. The function * <press_fct> will be called with the <press_data> arg when the signal is * activated, and so on for <release_fct>/<release_data> * Returns the pointer to the new signal if ok, NULL if the signal could not * be bound. */ static struct logical_input *panel_bind_callback(char *name, void (*press_fct) (int), int press_data, void (*release_fct) (int), int release_data) { struct logical_input *callback; callback = kmalloc(sizeof(struct logical_input), GFP_KERNEL); if (!callback) { printk(KERN_ERR "panel: not enough memory\n"); return NULL; } memset(callback, 0, sizeof(struct logical_input)); if (!input_name2mask(name, &callback->mask, &callback->value, &scan_mask_i, &scan_mask_o)) return NULL; callback->type = INPUT_TYPE_STD; callback->state = INPUT_ST_LOW; callback->rise_time = 1; callback->fall_time = 1; callback->u.std.press_fct = press_fct; callback->u.std.press_data = press_data; callback->u.std.release_fct = release_fct; callback->u.std.release_data = release_data; list_add(&callback->list, &logical_inputs); return callback; } #endif static void keypad_init(void) { int keynum; init_waitqueue_head(&keypad_read_wait); keypad_buflen = 0; /* flushes any eventual noisy keystroke */ /* Let's create all known keys */ for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) { panel_bind_key(keypad_profile[keynum][0], keypad_profile[keynum][1], keypad_profile[keynum][2], keypad_profile[keynum][3]); } init_scan_timer(); keypad_initialized = 1; } /**************************************************/ /* device initialization */ /**************************************************/ static int panel_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (lcd_enabled && lcd_initialized) { switch (code) { case SYS_DOWN: panel_lcd_print ("\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_HALT: panel_lcd_print ("\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_POWER_OFF: panel_lcd_print("\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+"); break; default: break; } } return NOTIFY_DONE; } static struct notifier_block panel_notifier = { panel_notify_sys, NULL, 0 }; static void panel_attach(struct parport *port) { if (port->number != parport) return; if (pprt) { printk(KERN_ERR "panel_attach(): port->number=%d parport=%d, " "already registered !\n", port->number, parport); return; } pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */ NULL, /*PARPORT_DEV_EXCL */ 0, (void *)&pprt); if (pprt == NULL) { pr_err("panel_attach(): port->number=%d parport=%d, " "parport_register_device() failed\n", port->number, parport); return; } if (parport_claim(pprt)) { printk(KERN_ERR "Panel: could not claim access to parport%d. " "Aborting.\n", parport); goto err_unreg_device; } /* must init LCD first, just in case an IRQ from the keypad is * generated at keypad init */ if (lcd_enabled) { lcd_init(); if (misc_register(&lcd_dev)) goto err_unreg_device; } if (keypad_enabled) { keypad_init(); if (misc_register(&keypad_dev)) goto err_lcd_unreg; } return; err_lcd_unreg: if (lcd_enabled) misc_deregister(&lcd_dev); err_unreg_device: parport_unregister_device(pprt); pprt = NULL; } static void panel_detach(struct parport *port) { if (port->number != parport) return; if (!pprt) { printk(KERN_ERR "panel_detach(): port->number=%d parport=%d, " "nothing to unregister.\n", port->number, parport); return; } if (keypad_enabled && keypad_initialized) { misc_deregister(&keypad_dev); keypad_initialized = 0; } if (lcd_enabled && lcd_initialized) { misc_deregister(&lcd_dev); lcd_initialized = 0; } parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } static struct parport_driver panel_driver = { .name = "panel", .attach = panel_attach, .detach = panel_detach, }; /* init function */ int panel_init(void) { /* for backwards compatibility */ if (keypad_type < 0) keypad_type = keypad_enabled; if (lcd_type < 0) lcd_type = lcd_enabled; if (parport < 0) parport = DEFAULT_PARPORT; /* take care of an eventual profile */ switch (profile) { case PANEL_PROFILE_CUSTOM: /* custom profile */ if (keypad_type < 0) keypad_type = DEFAULT_KEYPAD; if (lcd_type < 0) lcd_type = DEFAULT_LCD; break; case PANEL_PROFILE_OLD: /* 8 bits, 2*16, old keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_OLD; if (lcd_type < 0) lcd_type = LCD_TYPE_OLD; if (lcd_width < 0) lcd_width = 16; if (lcd_hwidth < 0) lcd_hwidth = 16; break; case PANEL_PROFILE_NEW: /* serial, 2*16, new keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NEW; if (lcd_type < 0) lcd_type = LCD_TYPE_KS0074; break; case PANEL_PROFILE_HANTRONIX: /* 8 bits, 2*16 hantronix-like, no keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NONE; if (lcd_type < 0) lcd_type = LCD_TYPE_HANTRONIX; break; case PANEL_PROFILE_NEXCOM: /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NEXCOM; if (lcd_type < 0) lcd_type = LCD_TYPE_NEXCOM; break; case PANEL_PROFILE_LARGE: /* 8 bits, 2*40, old keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_OLD; if (lcd_type < 0) lcd_type = LCD_TYPE_OLD; break; } lcd_enabled = (lcd_type > 0); keypad_enabled = (keypad_type > 0); switch (keypad_type) { case KEYPAD_TYPE_OLD: keypad_profile = old_keypad_profile; break; case KEYPAD_TYPE_NEW: keypad_profile = new_keypad_profile; break; case KEYPAD_TYPE_NEXCOM: keypad_profile = nexcom_keypad_profile; break; default: keypad_profile = NULL; break; } /* tells various subsystems about the fact that we are initializing */ init_in_progress = 1; if (parport_register_driver(&panel_driver)) { printk(KERN_ERR "Panel: could not register with parport. Aborting.\n"); return -EIO; } if (!lcd_enabled && !keypad_enabled) { /* no device enabled, let's release the parport */ if (pprt) { parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } parport_unregister_driver(&panel_driver); printk(KERN_ERR "Panel driver version " PANEL_VERSION " disabled.\n"); return -ENODEV; } register_reboot_notifier(&panel_notifier); if (pprt) printk(KERN_INFO "Panel driver version " PANEL_VERSION " registered on parport%d (io=0x%lx).\n", parport, pprt->port->base); else printk(KERN_INFO "Panel driver version " PANEL_VERSION " not yet registered\n"); /* tells various subsystems about the fact that initialization is finished */ init_in_progress = 0; return 0; } static int __init panel_init_module(void) { return panel_init(); } static void __exit panel_cleanup_module(void) { unregister_reboot_notifier(&panel_notifier); if (scan_timer.function != NULL) del_timer(&scan_timer); if (pprt != NULL) { if (keypad_enabled) { misc_deregister(&keypad_dev); keypad_initialized = 0; } if (lcd_enabled) { panel_lcd_print("\x0cLCD driver " PANEL_VERSION "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); misc_deregister(&lcd_dev); lcd_initialized = 0; } /* TODO: free all input signals */ parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } parport_unregister_driver(&panel_driver); } module_init(panel_init_module); module_exit(panel_cleanup_module); MODULE_AUTHOR("Willy Tarreau"); MODULE_LICENSE("GPL"); /* * Local variables: * c-indent-level: 4 * tab-width: 8 * End: */
gpl-2.0
chneukirchen/linux-jetson-tk1
arch/sh/boards/board-apsh4ad0a.c
4728
3405
/* * ALPHAPROJECT AP-SH4AD-0A Support. * * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. * Copyright (C) 2010 Matt Fleming * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/irq.h> #include <linux/clk.h> #include <asm/machvec.h> #include <asm/sizes.h> /* Dummy supplies, where voltage doesn't matter */ static struct regulator_consumer_supply dummy_supplies[] = { REGULATOR_SUPPLY("vddvario", "smsc911x"), REGULATOR_SUPPLY("vdd33a", "smsc911x"), }; static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", .start = 0xA4000000, .end = 0xA4000000 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "smsc911x-irq", .start = evt2irq(0x200), .end = evt2irq(0x200), .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc911x_config = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_16BIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static struct platform_device *apsh4ad0a_devices[] __initdata = { &smsc911x_device, }; static int __init apsh4ad0a_devices_setup(void) { regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); return platform_add_devices(apsh4ad0a_devices, ARRAY_SIZE(apsh4ad0a_devices)); } device_initcall(apsh4ad0a_devices_setup); static int apsh4ad0a_mode_pins(void) { int value = 0; /* These are the factory default settings of SW1 and SW2. * If you change these dip switches then you will need to * adjust the values below as well. */ value |= MODE_PIN0; /* Clock Mode 3 */ value |= MODE_PIN1; value &= ~MODE_PIN2; value &= ~MODE_PIN3; value &= ~MODE_PIN4; /* 16-bit Area0 bus width */ value |= MODE_PIN5; value |= MODE_PIN6; value |= MODE_PIN7; /* Normal mode */ value |= MODE_PIN8; /* Little Endian */ value |= MODE_PIN9; /* Crystal resonator */ value &= ~MODE_PIN10; /* 29-bit address mode */ value &= ~MODE_PIN11; /* PCI-E Root port */ value &= ~MODE_PIN12; /* 4 lane + 1 lane */ value |= MODE_PIN13; /* AUD Enable */ value &= ~MODE_PIN14; /* Normal Operation */ return value; } static int apsh4ad0a_clk_init(void) { struct clk *clk; int ret; clk = clk_get(NULL, "extal"); if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333000); clk_put(clk); return ret; } /* Initialize the board */ static void __init apsh4ad0a_setup(char **cmdline_p) { pr_info("Alpha Project AP-SH4AD-0A support:\n"); } static void __init apsh4ad0a_init_irq(void) { plat_irq_setup_pins(IRQ_MODE_IRQ3210); } /* * The Machine Vector */ static struct sh_machine_vector mv_apsh4ad0a __initmv = { .mv_name = "AP-SH4AD-0A", .mv_setup = apsh4ad0a_setup, .mv_mode_pins = apsh4ad0a_mode_pins, .mv_clk_init = apsh4ad0a_clk_init, .mv_init_irq = apsh4ad0a_init_irq, };
gpl-2.0
NoelMacwan/SXDHuashan
drivers/isdn/hisax/hfc_sx.c
4984
44319
/* $Id: hfc_sx.c,v 1.12.2.5 2004/02/11 13:21:33 keil Exp $ * * level driver for Cologne Chip Designs hfc-s+/sp based cards * * Author Werner Cornelius * based on existing driver for CCD HFC PCI cards * Copyright by Werner Cornelius <werner@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "hfc_sx.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/isapnp.h> #include <linux/slab.h> static const char *hfcsx_revision = "$Revision: 1.12.2.5 $"; /***************************************/ /* IRQ-table for CCDs demo board */ /* IRQs 6,5,10,11,12,15 are supported */ /***************************************/ /* Teles 16.3c Vendor Id TAG2620, Version 1.0, Vendor version 2.1 * * Thanks to Uwe Wisniewski * * ISA-SLOT Signal PIN * B25 IRQ3 92 IRQ_G * B23 IRQ5 94 IRQ_A * B4 IRQ2/9 95 IRQ_B * D3 IRQ10 96 IRQ_C * D4 IRQ11 97 IRQ_D * D5 IRQ12 98 IRQ_E * D6 IRQ15 99 IRQ_F */ #undef CCD_DEMO_BOARD #ifdef CCD_DEMO_BOARD static u_char ccd_sp_irqtab[16] = { 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 3, 4, 5, 0, 0, 6 }; #else /* Teles 16.3c */ static u_char ccd_sp_irqtab[16] = { 0, 0, 0, 7, 0, 1, 0, 0, 0, 2, 3, 4, 5, 0, 0, 6 }; #endif #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */ #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) /******************************/ /* In/Out access to registers */ /******************************/ static inline void Write_hfc(struct IsdnCardState *cs, u_char regnum, u_char val) { byteout(cs->hw.hfcsx.base + 1, regnum); byteout(cs->hw.hfcsx.base, val); } static inline u_char Read_hfc(struct IsdnCardState *cs, u_char regnum) { u_char ret; byteout(cs->hw.hfcsx.base + 1, regnum); ret = bytein(cs->hw.hfcsx.base); return (ret); } /**************************************************/ /* select a fifo and remember which one for reuse */ /**************************************************/ static void fifo_select(struct IsdnCardState *cs, u_char fifo) { if (fifo == cs->hw.hfcsx.last_fifo) return; /* still valid */ byteout(cs->hw.hfcsx.base + 1, HFCSX_FIF_SEL); byteout(cs->hw.hfcsx.base, fifo); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ udelay(4); byteout(cs->hw.hfcsx.base, fifo); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ } /******************************************/ /* reset the specified fifo to defaults. */ /* If its a send fifo init needed markers */ /******************************************/ static void reset_fifo(struct IsdnCardState *cs, u_char fifo) { fifo_select(cs, fifo); /* first select the fifo */ byteout(cs->hw.hfcsx.base + 1, HFCSX_CIRM); byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.cirm | 0x80); /* reset cmd */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ } /*************************************************************/ /* write_fifo writes the skb contents to the desired fifo */ /* if no space is available or an error occurs 0 is returned */ /* the skb is not released in any way. */ /*************************************************************/ static int write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans_max) { unsigned short *msp; int fifo_size, count, z1, z2; u_char f_msk, f1, f2, *src; if (skb->len <= 0) return (0); if (fifo & 1) return (0); /* no write fifo */ fifo_select(cs, fifo); if (fifo & 4) { fifo_size = D_FIFO_SIZE; /* D-channel */ f_msk = MAX_D_FRAMES; if (trans_max) return (0); /* only HDLC */ } else { fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */ f_msk = MAX_B_FRAMES; } z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); /* Check for transparent mode */ if (trans_max) { z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); count = z2 - z1; if (count <= 0) count += fifo_size; /* free bytes */ if (count < skb->len + 1) return (0); /* no room */ count = fifo_size - count; /* bytes still not send */ if (count > 2 * trans_max) return (0); /* delay to long */ count = skb->len; src = skb->data; while (count--) Write_hfc(cs, HFCSX_FIF_DWR, *src++); return (1); /* success */ } msp = ((struct hfcsx_extra *)(cs->hw.hfcsx.extra))->marker; msp += (((fifo >> 1) & 3) * (MAX_B_FRAMES + 1)); f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk; f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk; count = f1 - f2; /* frame count actually buffered */ if (count < 0) count += (f_msk + 1); /* if wrap around */ if (count > f_msk - 1) { if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d more as %d frames", fifo, f_msk - 1); return (0); } *(msp + f1) = z1; /* remember marker */ if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d f1(%x) f2(%x) z1(f1)(%x)", fifo, f1, f2, z1); /* now determine free bytes in FIFO buffer */ count = *(msp + f2) - z1; if (count <= 0) count += fifo_size; /* count now contains available bytes */ if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)", fifo, skb->len, count); if (count < skb->len) { if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d no fifo mem", fifo); return (0); } count = skb->len; /* get frame len */ src = skb->data; /* source pointer */ while (count--) Write_hfc(cs, HFCSX_FIF_DWR, *src++); Read_hfc(cs, HFCSX_FIF_INCF1); /* increment F1 */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ return (1); } /***************************************************************/ /* read_fifo reads data to an skb from the desired fifo */ /* if no data is available or an error occurs NULL is returned */ /* the skb is not released in any way. */ /***************************************************************/ static struct sk_buff * read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max) { int fifo_size, count, z1, z2; u_char f_msk, f1, f2, *dst; struct sk_buff *skb; if (!(fifo & 1)) return (NULL); /* no read fifo */ fifo_select(cs, fifo); if (fifo & 4) { fifo_size = D_FIFO_SIZE; /* D-channel */ f_msk = MAX_D_FRAMES; if (trans_max) return (NULL); /* only hdlc */ } else { fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */ f_msk = MAX_B_FRAMES; } /* transparent mode */ if (trans_max) { z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); /* now determine bytes in actual FIFO buffer */ count = z1 - z2; if (count <= 0) count += fifo_size; /* count now contains buffered bytes */ count++; if (count > trans_max) count = trans_max; /* limit length */ skb = dev_alloc_skb(count); if (skb) { dst = skb_put(skb, count); while (count--) *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); return skb; } else return NULL; /* no memory */ } do { f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk; f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk; if (f1 == f2) return (NULL); /* no frame available */ z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d f1(%x) f2(%x) z1(f2)(%x) z2(f2)(%x)", fifo, f1, f2, z1, z2); /* now determine bytes in actual FIFO buffer */ count = z1 - z2; if (count <= 0) count += fifo_size; /* count now contains buffered bytes */ count++; if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d count %u)", fifo, count); if ((count > fifo_size) || (count < 4)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count); while (count) { count--; /* empty fifo */ Read_hfc(cs, HFCSX_FIF_DRD); } skb = NULL; } else if ((skb = dev_alloc_skb(count - 3))) { count -= 3; dst = skb_put(skb, count); while (count--) *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 1 */ Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 2 */ if (Read_hfc(cs, HFCSX_FIF_DRD)) { dev_kfree_skb_irq(skb); if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d crc error", fifo); skb = NULL; } } else { printk(KERN_WARNING "HFC-SX: receive out of memory\n"); return (NULL); } Read_hfc(cs, HFCSX_FIF_INCF2); /* increment F2 */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ udelay(1); } while (!skb); /* retry in case of crc error */ return (skb); } /******************************************/ /* free hardware resources used by driver */ /******************************************/ static void release_io_hfcsx(struct IsdnCardState *cs) { cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET); /* Reset On */ msleep(30); /* Timeout 30ms */ Write_hfc(cs, HFCSX_CIRM, 0); /* Reset Off */ del_timer(&cs->hw.hfcsx.timer); release_region(cs->hw.hfcsx.base, 2); /* release IO-Block */ kfree(cs->hw.hfcsx.extra); cs->hw.hfcsx.extra = NULL; } /**********************************************************/ /* set_fifo_size determines the size of the RAM and FIFOs */ /* returning 0 -> need to reset the chip again. */ /**********************************************************/ static int set_fifo_size(struct IsdnCardState *cs) { if (cs->hw.hfcsx.b_fifo_size) return (1); /* already determined */ if ((cs->hw.hfcsx.chip >> 4) == 9) { cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_32K; return (1); } cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_8K; cs->hw.hfcsx.cirm |= 0x10; /* only 8K of ram */ return (0); } /********************************************************************************/ /* function called to reset the HFC SX chip. A complete software reset of chip */ /* and fifos is done. */ /********************************************************************************/ static void reset_hfcsx(struct IsdnCardState *cs) { cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); printk(KERN_INFO "HFC_SX: resetting card\n"); while (1) { Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET | cs->hw.hfcsx.cirm); /* Reset */ mdelay(30); Write_hfc(cs, HFCSX_CIRM, cs->hw.hfcsx.cirm); /* Reset Off */ mdelay(20); if (Read_hfc(cs, HFCSX_STATUS) & 2) printk(KERN_WARNING "HFC-SX init bit busy\n"); cs->hw.hfcsx.last_fifo = 0xff; /* invalidate */ if (!set_fifo_size(cs)) continue; break; } cs->hw.hfcsx.trm = 0 + HFCSX_BTRANS_THRESMASK; /* no echo connect , threshold */ Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); Write_hfc(cs, HFCSX_CLKDEL, 0x0e); /* ST-Bit delay for TE-Mode */ cs->hw.hfcsx.sctrl_e = HFCSX_AUTO_AWAKE; Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e); /* S/T Auto awake */ cs->hw.hfcsx.bswapped = 0; /* no exchange */ cs->hw.hfcsx.nt_mode = 0; /* we are in TE mode */ cs->hw.hfcsx.ctmt = HFCSX_TIM3_125 | HFCSX_AUTO_TIMER; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); cs->hw.hfcsx.int_m1 = HFCSX_INTS_DTRANS | HFCSX_INTS_DREC | HFCSX_INTS_L1STATE | HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCSX_INT_S1)); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 2); /* HFC ST 2 */ udelay(10); Write_hfc(cs, HFCSX_STATES, 2); /* HFC ST 2 */ cs->hw.hfcsx.mst_m = HFCSX_MASTER; /* HFC Master Mode */ Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); cs->hw.hfcsx.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */ Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); cs->hw.hfcsx.sctrl_r = 0; Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); /* Init GCI/IOM2 in master mode */ /* Slots 0 and 1 are set for B-chan 1 and 2 */ /* D- and monitor/CI channel are not enabled */ /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */ /* STIO2 is used as data input, B1+B2 from IOM->ST */ /* ST B-channel send disabled -> continuous 1s */ /* The IOM slots are always enabled */ cs->hw.hfcsx.conn = 0x36; /* set data flow directions */ Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */ Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */ Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */ Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */ /* Finally enable IRQ output */ cs->hw.hfcsx.int_m2 = HFCSX_IRQ_ENABLE; Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); if (Read_hfc(cs, HFCSX_INT_S2)); } /***************************************************/ /* Timer function called when kernel timer expires */ /***************************************************/ static void hfcsx_Timer(struct IsdnCardState *cs) { cs->hw.hfcsx.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80); add_timer(&cs->hw.hfcsx.timer); */ } /************************************************/ /* select a b-channel entry matching and active */ /************************************************/ static struct BCState * Sel_BCS(struct IsdnCardState *cs, int channel) { if (cs->bcs[0].mode && (cs->bcs[0].channel == channel)) return (&cs->bcs[0]); else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel)) return (&cs->bcs[1]); else return (NULL); } /*******************************/ /* D-channel receive procedure */ /*******************************/ static int receive_dmsg(struct IsdnCardState *cs) { struct sk_buff *skb; int count = 5; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_dmsg blocked"); return (1); } do { skb = read_fifo(cs, HFCSX_SEL_D_RX, 0); if (skb) { skb_queue_tail(&cs->rq, skb); schedule_event(cs, D_RCVBUFREADY); } } while (--count && skb); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); return (1); } /**********************************/ /* B-channel main receive routine */ /**********************************/ static void main_rec_hfcsx(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int count = 5; struct sk_buff *skb; Begin: count--; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_data %d blocked", bcs->channel); return; } skb = read_fifo(cs, ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ? HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX, (bcs->mode == L1_MODE_TRANS) ? HFCSX_BTRANS_THRESHOLD : 0); if (skb) { skb_queue_tail(&bcs->rqueue, skb); schedule_event(bcs, B_RCVBUFREADY); } test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); if (count && skb) goto Begin; return; } /**************************/ /* D-channel send routine */ /**************************/ static void hfcsx_fill_dfifo(struct IsdnCardState *cs) { if (!cs->tx_skb) return; if (cs->tx_skb->len <= 0) return; if (write_fifo(cs, cs->tx_skb, HFCSX_SEL_D_TX, 0)) { dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } return; } /**************************/ /* B-channel send routine */ /**************************/ static void hfcsx_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; if (write_fifo(cs, bcs->tx_skb, ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ? HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX, (bcs->mode == L1_MODE_TRANS) ? HFCSX_BTRANS_THRESHOLD : 0)) { bcs->tx_cnt -= bcs->tx_skb->len; if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->tx_skb->len; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } /**********************************************/ /* D-channel l1 state call for leased NT-mode */ /**********************************************/ static void dch_nt_l2l1(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; switch (pr) { case (PH_DATA | REQUEST): case (PH_PULL | REQUEST): case (PH_PULL | INDICATION): st->l1.l1hw(st, pr, arg); break; case (PH_ACTIVATE | REQUEST): st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); break; case (PH_TESTLOOP | REQUEST): if (1 & (long) arg) debugl1(cs, "PH_TEST_LOOP B1"); if (2 & (long) arg) debugl1(cs, "PH_TEST_LOOP B2"); if (!(3 & (long) arg)) debugl1(cs, "PH_TEST_LOOP DISABLED"); st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg); break; default: if (cs->debug) debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr); break; } } /***********************/ /* set/reset echo mode */ /***********************/ static int hfcsx_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) { unsigned long flags; int i = *(unsigned int *) ic->parm.num; if ((ic->arg == 98) && (!(cs->hw.hfcsx.int_m1 & (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC + HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC)))) { spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 0); /* HFC ST G0 */ udelay(10); cs->hw.hfcsx.sctrl |= SCTRL_MODE_NT; Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); /* set NT-mode */ udelay(10); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 1); /* HFC ST G1 */ udelay(10); Write_hfc(cs, HFCSX_STATES, 1 | HFCSX_ACTIVATE | HFCSX_DO_ACTION); cs->dc.hfcsx.ph_state = 1; cs->hw.hfcsx.nt_mode = 1; cs->hw.hfcsx.nt_timer = 0; spin_unlock_irqrestore(&cs->lock, flags); cs->stlist->l2.l2l1 = dch_nt_l2l1; debugl1(cs, "NT mode activated"); return (0); } if ((cs->chanlimit > 1) || (cs->hw.hfcsx.bswapped) || (cs->hw.hfcsx.nt_mode) || (ic->arg != 12)) return (-EINVAL); if (i) { cs->logecho = 1; cs->hw.hfcsx.trm |= 0x20; /* enable echo chan */ cs->hw.hfcsx.int_m1 |= HFCSX_INTS_B2REC; /* reset Channel !!!!! */ } else { cs->logecho = 0; cs->hw.hfcsx.trm &= ~0x20; /* disable echo chan */ cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_B2REC; } cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA; cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcsx.conn |= 0x10; /* B2-IOM -> B2-ST */ cs->hw.hfcsx.ctmt &= ~2; spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); spin_unlock_irqrestore(&cs->lock, flags); return (0); } /* hfcsx_auxcmd */ /*****************************/ /* E-channel receive routine */ /*****************************/ static void receive_emsg(struct IsdnCardState *cs) { int count = 5; u_char *ptr; struct sk_buff *skb; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "echo_rec_data blocked"); return; } do { skb = read_fifo(cs, HFCSX_SEL_B2_RX, 0); if (skb) { if (cs->debug & DEB_DLOG_HEX) { ptr = cs->dlog; if ((skb->len) < MAX_DLOG_SPACE / 3 - 10) { *ptr++ = 'E'; *ptr++ = 'C'; *ptr++ = 'H'; *ptr++ = 'O'; *ptr++ = ':'; ptr += QuickHex(ptr, skb->data, skb->len); ptr--; *ptr++ = '\n'; *ptr = 0; HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); } dev_kfree_skb_any(skb); } } while (--count && skb); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); return; } /* receive_emsg */ /*********************/ /* Interrupt handler */ /*********************/ static irqreturn_t hfcsx_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char exval; struct BCState *bcs; int count = 15; u_long flags; u_char val, stat; if (!(cs->hw.hfcsx.int_m2 & 0x08)) return IRQ_NONE; /* not initialised */ spin_lock_irqsave(&cs->lock, flags); if (HFCSX_ANYINT & (stat = Read_hfc(cs, HFCSX_STATUS))) { val = Read_hfc(cs, HFCSX_INT_S1); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX: stat(%02x) s1(%02x)", stat, val); } else { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX irq %x %s", val, test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ? "locked" : "unlocked"); val &= cs->hw.hfcsx.int_m1; if (val & 0x40) { /* state machine irq */ exval = Read_hfc(cs, HFCSX_STATES) & 0xf; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcsx.ph_state, exval); cs->dc.hfcsx.ph_state = exval; schedule_event(cs, D_L1STATECHANGE); val &= ~0x40; } if (val & 0x80) { /* timer irq */ if (cs->hw.hfcsx.nt_mode) { if ((--cs->hw.hfcsx.nt_timer) < 0) schedule_event(cs, D_L1STATECHANGE); } val &= ~0x80; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); } while (val) { if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { cs->hw.hfcsx.int_s1 |= val; spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } if (cs->hw.hfcsx.int_s1 & 0x18) { exval = val; val = cs->hw.hfcsx.int_s1; cs->hw.hfcsx.int_s1 = exval; } if (val & 0x08) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x08 IRQ"); } else main_rec_hfcsx(bcs); } if (val & 0x10) { if (cs->logecho) receive_emsg(cs); else if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x10 IRQ"); } else main_rec_hfcsx(bcs); } if (val & 0x01) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x01 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { schedule_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x02) { if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x02 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { schedule_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x20) { /* receive dframe */ receive_dmsg(cs); } if (val & 0x04) { /* dframe transmitted */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcsx_fill_dfifo irq blocked"); } goto afterXPR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcsx_fill_dfifo irq blocked"); } } else schedule_event(cs, D_XMTBUFREADY); } afterXPR: if (cs->hw.hfcsx.int_s1 && count--) { val = cs->hw.hfcsx.int_s1; cs->hw.hfcsx.int_s1 = 0; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX irq %x loop %d", val, 15 - count); } else val = 0; } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } /********************************************************************/ /* timer callback for D-chan busy resolution. Currently no function */ /********************************************************************/ static void hfcsx_dbusy_timer(struct IsdnCardState *cs) { } /*************************************/ /* Layer 1 D-channel hardware access */ /*************************************/ static void HFCSX_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcsx_fill_dfifo blocked"); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcsx_fill_dfifo blocked"); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 3); /* HFC ST 3 */ udelay(6); Write_hfc(cs, HFCSX_STATES, 3); /* HFC ST 2 */ cs->hw.hfcsx.mst_m |= HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); l1_msg(cs, HW_POWERUP | CONFIRM, NULL); break; case (HW_ENABLE | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_DEACTIVATE | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.mst_m &= ~HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_INFO3 | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.mst_m |= HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_TESTLOOP | REQUEST): spin_lock_irqsave(&cs->lock, flags); switch ((long) arg) { case (1): Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* tx slot */ Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* rx slot */ cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~7) | 1; Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); break; case (2): Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* tx slot */ Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* rx slot */ cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~0x38) | 0x08; Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); break; default: spin_unlock_irqrestore(&cs->lock, flags); if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg); return; } cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */ Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); spin_unlock_irqrestore(&cs->lock, flags); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_l1hw unknown pr %4x", pr); break; } } /***********************************************/ /* called during init setting l1 stack pointer */ /***********************************************/ static void setstack_hfcsx(struct PStack *st, struct IsdnCardState *cs) { st->l1.l1hw = HFCSX_l1hw; } /**************************************/ /* send B-channel data if not blocked */ /**************************************/ static void hfcsx_send_data(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "send_data %d blocked", bcs->channel); } /***************************************************************/ /* activate/deactivate hardware for selected channels and mode */ /***************************************************************/ static void mode_hfcsx(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int fifo2; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HFCSX bchannel mode %d bchan %d/%d", mode, bc, bcs->channel); bcs->mode = mode; bcs->channel = bc; fifo2 = bc; if (cs->chanlimit > 1) { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } else { if (bc) { if (mode != L1_MODE_NULL) { cs->hw.hfcsx.bswapped = 1; /* B1 and B2 exchanged */ cs->hw.hfcsx.sctrl_e |= 0x80; } else { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } fifo2 = 0; } else { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } } switch (mode) { case (L1_MODE_NULL): if (bc) { cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl &= ~SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r &= ~SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); } else { cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); } break; case (L1_MODE_TRANS): if (bc) { cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); cs->hw.hfcsx.ctmt |= 2; cs->hw.hfcsx.conn &= ~0x18; } else { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); cs->hw.hfcsx.ctmt |= 1; cs->hw.hfcsx.conn &= ~0x03; } break; case (L1_MODE_HDLC): if (bc) { cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); cs->hw.hfcsx.ctmt &= ~2; cs->hw.hfcsx.conn &= ~0x18; } else { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); cs->hw.hfcsx.ctmt &= ~1; cs->hw.hfcsx.conn &= ~0x03; } break; case (L1_MODE_EXTRN): if (bc) { cs->hw.hfcsx.conn |= 0x10; cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); } else { cs->hw.hfcsx.conn |= 0x02; cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); } break; } Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e); Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); if (mode != L1_MODE_EXTRN) { reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX); reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX); } } /******************************/ /* Layer2 -> Layer 1 Transfer */ /******************************/ static void hfcsx_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n"); } else { // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); mode_hfcsx(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); mode_hfcsx(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } /******************************************/ /* deactivate B-channel access and queues */ /******************************************/ static void close_hfcsx(struct BCState *bcs) { mode_hfcsx(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } /*************************************/ /* init B-channel queues and control */ /*************************************/ static int open_hfcsxstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->tx_cnt = 0; return (0); } /*********************************/ /* inits the stack for B-channel */ /*********************************/ static int setstack_2b(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hfcsxstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hfcsx_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } /***************************/ /* handle L1 state changes */ /***************************/ static void hfcsx_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); u_long flags; if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { if (!cs->hw.hfcsx.nt_mode) switch (cs->dc.hfcsx.ph_state) { case (0): l1_msg(cs, HW_RESET | INDICATION, NULL); break; case (3): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (8): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (6): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; case (7): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; default: break; } else { switch (cs->dc.hfcsx.ph_state) { case (2): spin_lock_irqsave(&cs->lock, flags); if (cs->hw.hfcsx.nt_timer < 0) { cs->hw.hfcsx.nt_timer = 0; cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCSX_INT_S1)); Write_hfc(cs, HFCSX_STATES, 4 | HFCSX_LOAD_STATE); udelay(10); Write_hfc(cs, HFCSX_STATES, 4); cs->dc.hfcsx.ph_state = 4; } else { cs->hw.hfcsx.int_m1 |= HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); cs->hw.hfcsx.ctmt &= ~HFCSX_AUTO_TIMER; cs->hw.hfcsx.ctmt |= HFCSX_TIM3_125; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); cs->hw.hfcsx.nt_timer = NT_T1_COUNT; Write_hfc(cs, HFCSX_STATES, 2 | HFCSX_NT_G2_G3); /* allow G2 -> G3 transition */ } spin_unlock_irqrestore(&cs->lock, flags); break; case (1): case (3): case (4): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.nt_timer = 0; cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); spin_unlock_irqrestore(&cs->lock, flags); break; default: break; } } } if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) DChannel_proc_xmt(cs); } /********************************/ /* called for card init message */ /********************************/ static void inithfcsx(struct IsdnCardState *cs) { cs->setstack_d = setstack_hfcsx; cs->BC_Send_Data = &hfcsx_send_data; cs->bcs[0].BC_SetStack = setstack_2b; cs->bcs[1].BC_SetStack = setstack_2b; cs->bcs[0].BC_Close = close_hfcsx; cs->bcs[1].BC_Close = close_hfcsx; mode_hfcsx(cs->bcs, 0, 0); mode_hfcsx(cs->bcs + 1, 0, 1); } /*******************************************/ /* handle card messages from control layer */ /*******************************************/ static int hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCSX: card_msg %x", mt); switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_hfcsx(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_hfcsx(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithfcsx(cs); spin_unlock_irqrestore(&cs->lock, flags); msleep(80); /* Timeout 80ms */ /* now switch timer interrupt off */ spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* reinit mode reg */ Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } #ifdef __ISAPNP__ static struct isapnp_device_id hfc_ids[] __devinitdata = { { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620), ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620), (unsigned long) "Teles 16.3c2" }, { 0, } }; static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0]; static struct pnp_card *pnp_c __devinitdata = NULL; #endif int __devinit setup_hfcsx(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, hfcsx_revision); printk(KERN_INFO "HiSax: HFC-SX driver Rev. %s\n", HiSax_getrev(tmp)); #ifdef __ISAPNP__ if (!card->para[1] && isapnp_present()) { struct pnp_dev *pnp_d; while (ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err < 0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return (0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return (0); } break; } else { printk(KERN_ERR "HFC PnP: PnP error card found, no device\n"); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "HFC PnP: no ISAPnP card found\n"); return (0); } } #endif cs->hw.hfcsx.base = card->para[1] & 0xfffe; cs->irq = card->para[0]; cs->hw.hfcsx.int_s1 = 0; cs->dc.hfcsx.ph_state = 0; cs->hw.hfcsx.fifo = 255; if ((cs->typ == ISDN_CTYPE_HFC_SX) || (cs->typ == ISDN_CTYPE_HFC_SP_PCMCIA)) { if ((!cs->hw.hfcsx.base) || !request_region(cs->hw.hfcsx.base, 2, "HFCSX isdn")) { printk(KERN_WARNING "HiSax: HFC-SX io-base %#lx already in use\n", cs->hw.hfcsx.base); return (0); } byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.base & 0xFF); byteout(cs->hw.hfcsx.base + 1, ((cs->hw.hfcsx.base >> 8) & 3) | 0x54); udelay(10); cs->hw.hfcsx.chip = Read_hfc(cs, HFCSX_CHIP_ID); switch (cs->hw.hfcsx.chip >> 4) { case 1: tmp[0] = '+'; break; case 9: tmp[0] = 'P'; break; default: printk(KERN_WARNING "HFC-SX: invalid chip id 0x%x\n", cs->hw.hfcsx.chip >> 4); release_region(cs->hw.hfcsx.base, 2); return (0); } if (!ccd_sp_irqtab[cs->irq & 0xF]) { printk(KERN_WARNING "HFC_SX: invalid irq %d specified\n", cs->irq & 0xF); release_region(cs->hw.hfcsx.base, 2); return (0); } if (!(cs->hw.hfcsx.extra = (void *) kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) { release_region(cs->hw.hfcsx.base, 2); printk(KERN_WARNING "HFC-SX: unable to allocate memory\n"); return (0); } printk(KERN_INFO "HFC-S%c chip detected at base 0x%x IRQ %d HZ %d\n", tmp[0], (u_int) cs->hw.hfcsx.base, cs->irq, HZ); cs->hw.hfcsx.int_m2 = 0; /* disable alle interrupts */ cs->hw.hfcsx.int_m1 = 0; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); } else return (0); /* no valid card type */ cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); INIT_WORK(&cs->tqueue, hfcsx_bh); cs->readisac = NULL; cs->writeisac = NULL; cs->readisacfifo = NULL; cs->writeisacfifo = NULL; cs->BC_Read_Reg = NULL; cs->BC_Write_Reg = NULL; cs->irq_func = &hfcsx_interrupt; cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer; cs->hw.hfcsx.timer.data = (long) cs; cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */ cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */ init_timer(&cs->hw.hfcsx.timer); reset_hfcsx(cs); cs->cardmsg = &hfcsx_card_msg; cs->auxcmd = &hfcsx_auxcmd; return (1); }
gpl-2.0
ParanoidSaberVoid/android_kernel_htc_msm8960
drivers/misc/spear13xx_pcie_gadget.c
4984
23083
/* * drivers/misc/spear13xx_pcie_gadget.c * * Copyright (C) 2010 ST Microelectronics * Pratyush Anand<pratyush.anand@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pci_regs.h> #include <linux/configfs.h> #include <mach/pcie.h> #include <mach/misc_regs.h> #define IN0_MEM_SIZE (200 * 1024 * 1024 - 1) /* In current implementation address translation is done using IN0 only. * So IN1 start address and IN0 end address has been kept same */ #define IN1_MEM_SIZE (0 * 1024 * 1024 - 1) #define IN_IO_SIZE (20 * 1024 * 1024 - 1) #define IN_CFG0_SIZE (12 * 1024 * 1024 - 1) #define IN_CFG1_SIZE (12 * 1024 * 1024 - 1) #define IN_MSG_SIZE (12 * 1024 * 1024 - 1) /* Keep default BAR size as 4K*/ /* AORAM would be mapped by default*/ #define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1) #define INT_TYPE_NO_INT 0 #define INT_TYPE_INTX 1 #define INT_TYPE_MSI 2 struct spear_pcie_gadget_config { void __iomem *base; void __iomem *va_app_base; void __iomem *va_dbi_base; char int_type[10]; ulong requested_msi; ulong configured_msi; ulong bar0_size; ulong bar0_rw_offset; void __iomem *va_bar0_address; }; struct pcie_gadget_target { struct configfs_subsystem subsys; struct spear_pcie_gadget_config config; }; struct pcie_gadget_target_attr { struct configfs_attribute attr; ssize_t (*show)(struct spear_pcie_gadget_config *config, char *buf); ssize_t (*store)(struct spear_pcie_gadget_config *config, const char *buf, size_t count); }; static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg) { /* Enable DBI access */ writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID), &app_reg->slv_armisc); writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID), &app_reg->slv_awmisc); } static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg) { /* disable DBI access */ writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), &app_reg->slv_armisc); writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), &app_reg->slv_awmisc); } static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config, int where, int size, u32 *val) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; ulong va_address; /* Enable DBI access */ enable_dbi_access(app_reg); va_address = (ulong)config->va_dbi_base + (where & ~0x3); *val = readl(va_address); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; /* Disable DBI access */ disable_dbi_access(app_reg); } static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config, int where, int size, u32 val) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; ulong va_address; /* Enable DBI access */ enable_dbi_access(app_reg); va_address = (ulong)config->va_dbi_base + (where & ~0x3); if (size == 4) writel(val, va_address); else if (size == 2) writew(val, va_address + (where & 2)); else if (size == 1) writeb(val, va_address + (where & 3)); /* Disable DBI access */ disable_dbi_access(app_reg); } #define PCI_FIND_CAP_TTL 48 static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config, u32 pos, int cap, int *ttl) { u32 id; while ((*ttl)--) { spear_dbi_read_reg(config, pos, 1, &pos); if (pos < 0x40) break; pos &= ~3; spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id); if (id == 0xff) break; if (id == cap) return pos; pos += PCI_CAP_LIST_NEXT; } return 0; } static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config, u32 pos, int cap) { int ttl = PCI_FIND_CAP_TTL; return pci_find_own_next_cap_ttl(config, pos, cap, &ttl); } static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config, u8 hdr_type) { u32 status; spear_dbi_read_reg(config, PCI_STATUS, 2, &status); if (!(status & PCI_STATUS_CAP_LIST)) return 0; switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: case PCI_HEADER_TYPE_BRIDGE: return PCI_CAPABILITY_LIST; case PCI_HEADER_TYPE_CARDBUS: return PCI_CB_CAPABILITY_LIST; default: return 0; } return 0; } /* * Tell if a device supports a given PCI capability. * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. Possible values for @cap: * * %PCI_CAP_ID_PM Power Management * %PCI_CAP_ID_AGP Accelerated Graphics Port * %PCI_CAP_ID_VPD Vital Product Data * %PCI_CAP_ID_SLOTID Slot Identification * %PCI_CAP_ID_MSI Message Signalled Interrupts * %PCI_CAP_ID_CHSWP CompactPCI HotSwap * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ static int pci_find_own_capability(struct spear_pcie_gadget_config *config, int cap) { u32 pos; u32 hdr_type; spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type); pos = pci_find_own_cap_start(config, hdr_type); if (pos) pos = pci_find_own_next_cap(config, pos, cap); return pos; } static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id) { return 0; } /* * configfs interfaces show/store functions */ static ssize_t pcie_gadget_show_link( struct spear_pcie_gadget_config *config, char *buf) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID)) return sprintf(buf, "UP"); else return sprintf(buf, "DOWN"); } static ssize_t pcie_gadget_store_link( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; if (sysfs_streq(buf, "UP")) writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID), &app_reg->app_ctrl_0); else if (sysfs_streq(buf, "DOWN")) writel(readl(&app_reg->app_ctrl_0) & ~(1 << APP_LTSSM_ENABLE_ID), &app_reg->app_ctrl_0); else return -EINVAL; return count; } static ssize_t pcie_gadget_show_int_type( struct spear_pcie_gadget_config *config, char *buf) { return sprintf(buf, "%s", config->int_type); } static ssize_t pcie_gadget_store_int_type( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { u32 cap, vec, flags; ulong vector; if (sysfs_streq(buf, "INTA")) spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); else if (sysfs_streq(buf, "MSI")) { vector = config->requested_msi; vec = 0; while (vector > 1) { vector /= 2; vec++; } spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0); cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); flags &= ~PCI_MSI_FLAGS_QMASK; flags |= vec << 1; spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags); } else return -EINVAL; strcpy(config->int_type, buf); return count; } static ssize_t pcie_gadget_show_no_of_msi( struct spear_pcie_gadget_config *config, char *buf) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; u32 cap, vec, flags; ulong vector; if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID)) != (1 << CFG_MSI_EN_ID)) vector = 0; else { cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); flags &= ~PCI_MSI_FLAGS_QSIZE; vec = flags >> 4; vector = 1; while (vec--) vector *= 2; } config->configured_msi = vector; return sprintf(buf, "%lu", vector); } static ssize_t pcie_gadget_store_no_of_msi( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { if (strict_strtoul(buf, 0, &config->requested_msi)) return -EINVAL; if (config->requested_msi > 32) config->requested_msi = 32; return count; } static ssize_t pcie_gadget_store_inta( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; ulong en; if (strict_strtoul(buf, 0, &en)) return -EINVAL; if (en) writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID), &app_reg->app_ctrl_0); else writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID), &app_reg->app_ctrl_0); return count; } static ssize_t pcie_gadget_store_send_msi( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; ulong vector; u32 ven_msi; if (strict_strtoul(buf, 0, &vector)) return -EINVAL; if (!config->configured_msi) return -EINVAL; if (vector >= config->configured_msi) return -EINVAL; ven_msi = readl(&app_reg->ven_msi_1); ven_msi &= ~VEN_MSI_FUN_NUM_MASK; ven_msi |= 0 << VEN_MSI_FUN_NUM_ID; ven_msi &= ~VEN_MSI_TC_MASK; ven_msi |= 0 << VEN_MSI_TC_ID; ven_msi &= ~VEN_MSI_VECTOR_MASK; ven_msi |= vector << VEN_MSI_VECTOR_ID; /* generating interrupt for msi vector */ ven_msi |= VEN_MSI_REQ_EN; writel(ven_msi, &app_reg->ven_msi_1); udelay(1); ven_msi &= ~VEN_MSI_REQ_EN; writel(ven_msi, &app_reg->ven_msi_1); return count; } static ssize_t pcie_gadget_show_vendor_id( struct spear_pcie_gadget_config *config, char *buf) { u32 id; spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id); return sprintf(buf, "%x", id); } static ssize_t pcie_gadget_store_vendor_id( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { ulong id; if (strict_strtoul(buf, 0, &id)) return -EINVAL; spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id); return count; } static ssize_t pcie_gadget_show_device_id( struct spear_pcie_gadget_config *config, char *buf) { u32 id; spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id); return sprintf(buf, "%x", id); } static ssize_t pcie_gadget_store_device_id( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { ulong id; if (strict_strtoul(buf, 0, &id)) return -EINVAL; spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id); return count; } static ssize_t pcie_gadget_show_bar0_size( struct spear_pcie_gadget_config *config, char *buf) { return sprintf(buf, "%lx", config->bar0_size); } static ssize_t pcie_gadget_store_bar0_size( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { ulong size; u32 pos, pos1; u32 no_of_bit = 0; if (strict_strtoul(buf, 0, &size)) return -EINVAL; /* min bar size is 256 */ if (size <= 0x100) size = 0x100; /* max bar size is 1MB*/ else if (size >= 0x100000) size = 0x100000; else { pos = 0; pos1 = 0; while (pos < 21) { pos = find_next_bit((ulong *)&size, 21, pos); if (pos != 21) pos1 = pos + 1; pos++; no_of_bit++; } if (no_of_bit == 2) pos1--; size = 1 << pos1; } config->bar0_size = size; spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1); return count; } static ssize_t pcie_gadget_show_bar0_address( struct spear_pcie_gadget_config *config, char *buf) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; u32 address = readl(&app_reg->pim0_mem_addr_start); return sprintf(buf, "%x", address); } static ssize_t pcie_gadget_store_bar0_address( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; ulong address; if (strict_strtoul(buf, 0, &address)) return -EINVAL; address &= ~(config->bar0_size - 1); if (config->va_bar0_address) iounmap(config->va_bar0_address); config->va_bar0_address = ioremap(address, config->bar0_size); if (!config->va_bar0_address) return -ENOMEM; writel(address, &app_reg->pim0_mem_addr_start); return count; } static ssize_t pcie_gadget_show_bar0_rw_offset( struct spear_pcie_gadget_config *config, char *buf) { return sprintf(buf, "%lx", config->bar0_rw_offset); } static ssize_t pcie_gadget_store_bar0_rw_offset( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { ulong offset; if (strict_strtoul(buf, 0, &offset)) return -EINVAL; if (offset % 4) return -EINVAL; config->bar0_rw_offset = offset; return count; } static ssize_t pcie_gadget_show_bar0_data( struct spear_pcie_gadget_config *config, char *buf) { ulong data; if (!config->va_bar0_address) return -ENOMEM; data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset); return sprintf(buf, "%lx", data); } static ssize_t pcie_gadget_store_bar0_data( struct spear_pcie_gadget_config *config, const char *buf, size_t count) { ulong data; if (strict_strtoul(buf, 0, &data)) return -EINVAL; if (!config->va_bar0_address) return -ENOMEM; writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset); return count; } /* * Attribute definitions. */ #define PCIE_GADGET_TARGET_ATTR_RO(_name) \ static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL) #define PCIE_GADGET_TARGET_ATTR_WO(_name) \ static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name) #define PCIE_GADGET_TARGET_ATTR_RW(_name) \ static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \ pcie_gadget_store_##_name) PCIE_GADGET_TARGET_ATTR_RW(link); PCIE_GADGET_TARGET_ATTR_RW(int_type); PCIE_GADGET_TARGET_ATTR_RW(no_of_msi); PCIE_GADGET_TARGET_ATTR_WO(inta); PCIE_GADGET_TARGET_ATTR_WO(send_msi); PCIE_GADGET_TARGET_ATTR_RW(vendor_id); PCIE_GADGET_TARGET_ATTR_RW(device_id); PCIE_GADGET_TARGET_ATTR_RW(bar0_size); PCIE_GADGET_TARGET_ATTR_RW(bar0_address); PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset); PCIE_GADGET_TARGET_ATTR_RW(bar0_data); static struct configfs_attribute *pcie_gadget_target_attrs[] = { &pcie_gadget_target_link.attr, &pcie_gadget_target_int_type.attr, &pcie_gadget_target_no_of_msi.attr, &pcie_gadget_target_inta.attr, &pcie_gadget_target_send_msi.attr, &pcie_gadget_target_vendor_id.attr, &pcie_gadget_target_device_id.attr, &pcie_gadget_target_bar0_size.attr, &pcie_gadget_target_bar0_address.attr, &pcie_gadget_target_bar0_rw_offset.attr, &pcie_gadget_target_bar0_data.attr, NULL, }; static struct pcie_gadget_target *to_target(struct config_item *item) { return item ? container_of(to_configfs_subsystem(to_config_group(item)), struct pcie_gadget_target, subsys) : NULL; } /* * Item operations and type for pcie_gadget_target. */ static ssize_t pcie_gadget_target_attr_show(struct config_item *item, struct configfs_attribute *attr, char *buf) { ssize_t ret = -EINVAL; struct pcie_gadget_target *target = to_target(item); struct pcie_gadget_target_attr *t_attr = container_of(attr, struct pcie_gadget_target_attr, attr); if (t_attr->show) ret = t_attr->show(&target->config, buf); return ret; } static ssize_t pcie_gadget_target_attr_store(struct config_item *item, struct configfs_attribute *attr, const char *buf, size_t count) { ssize_t ret = -EINVAL; struct pcie_gadget_target *target = to_target(item); struct pcie_gadget_target_attr *t_attr = container_of(attr, struct pcie_gadget_target_attr, attr); if (t_attr->store) ret = t_attr->store(&target->config, buf, count); return ret; } static struct configfs_item_operations pcie_gadget_target_item_ops = { .show_attribute = pcie_gadget_target_attr_show, .store_attribute = pcie_gadget_target_attr_store, }; static struct config_item_type pcie_gadget_target_type = { .ct_attrs = pcie_gadget_target_attrs, .ct_item_ops = &pcie_gadget_target_item_ops, .ct_owner = THIS_MODULE, }; static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config) { struct pcie_app_reg __iomem *app_reg = config->va_app_base; /*setup registers for outbound translation */ writel(config->base, &app_reg->in0_mem_addr_start); writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE, &app_reg->in0_mem_addr_limit); writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start); writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE, &app_reg->in1_mem_addr_limit); writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start); writel(app_reg->in_io_addr_start + IN_IO_SIZE, &app_reg->in_io_addr_limit); writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start); writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE, &app_reg->in_cfg0_addr_limit); writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start); writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE, &app_reg->in_cfg1_addr_limit); writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start); writel(app_reg->in_msg_addr_start + IN_MSG_SIZE, &app_reg->in_msg_addr_limit); writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start); writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start); writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start); /*setup registers for inbound translation */ /* Keep AORAM mapped at BAR0 as default */ config->bar0_size = INBOUND_ADDR_MASK + 1; spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK); spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC); config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE, config->bar0_size); writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start); writel(0, &app_reg->pim1_mem_addr_start); writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit); writel(0x0, &app_reg->pim_io_addr_start); writel(0x0, &app_reg->pim_io_addr_start); writel(0x0, &app_reg->pim_rom_addr_start); writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID) | ((u32)1 << REG_TRANSLATION_ENABLE), &app_reg->app_ctrl_0); /* disable all rx interrupts */ writel(0, &app_reg->int_mask); /* Select INTA as default*/ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); } static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev) { struct resource *res0, *res1; unsigned int status = 0; int irq; struct clk *clk; static struct pcie_gadget_target *target; struct spear_pcie_gadget_config *config; struct config_item *cg_item; struct configfs_subsystem *subsys; /* get resource for application registers*/ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res0) { dev_err(&pdev->dev, "no resource defined\n"); return -EBUSY; } if (!request_mem_region(res0->start, resource_size(res0), pdev->name)) { dev_err(&pdev->dev, "pcie gadget region already claimed\n"); return -EBUSY; } /* get resource for dbi registers*/ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res1) { dev_err(&pdev->dev, "no resource defined\n"); goto err_rel_res0; } if (!request_mem_region(res1->start, resource_size(res1), pdev->name)) { dev_err(&pdev->dev, "pcie gadget region already claimed\n"); goto err_rel_res0; } target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) { dev_err(&pdev->dev, "out of memory\n"); status = -ENOMEM; goto err_rel_res; } cg_item = &target->subsys.su_group.cg_item; sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id); cg_item->ci_type = &pcie_gadget_target_type; config = &target->config; config->va_app_base = (void __iomem *)ioremap(res0->start, resource_size(res0)); if (!config->va_app_base) { dev_err(&pdev->dev, "ioremap fail\n"); status = -ENOMEM; goto err_kzalloc; } config->base = (void __iomem *)res1->start; config->va_dbi_base = (void __iomem *)ioremap(res1->start, resource_size(res1)); if (!config->va_dbi_base) { dev_err(&pdev->dev, "ioremap fail\n"); status = -ENOMEM; goto err_iounmap_app; } dev_set_drvdata(&pdev->dev, target); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no update irq?\n"); status = irq; goto err_iounmap; } status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL); if (status) { dev_err(&pdev->dev, "pcie gadget interrupt IRQ%d already claimed\n", irq); goto err_iounmap; } /* Register configfs hooks */ subsys = &target->subsys; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); status = configfs_register_subsystem(subsys); if (status) goto err_irq; /* * init basic pcie application registers * do not enable clock if it is PCIE0.Ideally , all controller should * have been independent from others with respect to clock. But PCIE1 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init. */ if (pdev->id == 1) { /* * Ideally CFG Clock should have been also enabled here. But * it is done currently during board init routne */ clk = clk_get_sys("pcie1", NULL); if (IS_ERR(clk)) { pr_err("%s:couldn't get clk for pcie1\n", __func__); goto err_irq; } if (clk_enable(clk)) { pr_err("%s:couldn't enable clk for pcie1\n", __func__); goto err_irq; } } else if (pdev->id == 2) { /* * Ideally CFG Clock should have been also enabled here. But * it is done currently during board init routne */ clk = clk_get_sys("pcie2", NULL); if (IS_ERR(clk)) { pr_err("%s:couldn't get clk for pcie2\n", __func__); goto err_irq; } if (clk_enable(clk)) { pr_err("%s:couldn't enable clk for pcie2\n", __func__); goto err_irq; } } spear13xx_pcie_device_init(config); return 0; err_irq: free_irq(irq, NULL); err_iounmap: iounmap(config->va_dbi_base); err_iounmap_app: iounmap(config->va_app_base); err_kzalloc: kfree(target); err_rel_res: release_mem_region(res1->start, resource_size(res1)); err_rel_res0: release_mem_region(res0->start, resource_size(res0)); return status; } static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev) { struct resource *res0, *res1; static struct pcie_gadget_target *target; struct spear_pcie_gadget_config *config; int irq; res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); irq = platform_get_irq(pdev, 0); target = dev_get_drvdata(&pdev->dev); config = &target->config; free_irq(irq, NULL); iounmap(config->va_dbi_base); iounmap(config->va_app_base); release_mem_region(res1->start, resource_size(res1)); release_mem_region(res0->start, resource_size(res0)); configfs_unregister_subsystem(&target->subsys); kfree(target); return 0; } static void spear_pcie_gadget_shutdown(struct platform_device *pdev) { } static struct platform_driver spear_pcie_gadget_driver = { .probe = spear_pcie_gadget_probe, .remove = spear_pcie_gadget_remove, .shutdown = spear_pcie_gadget_shutdown, .driver = { .name = "pcie-gadget-spear", .bus = &platform_bus_type }, }; module_platform_driver(spear_pcie_gadget_driver); MODULE_ALIAS("platform:pcie-gadget-spear"); MODULE_AUTHOR("Pratyush Anand"); MODULE_LICENSE("GPL");
gpl-2.0
cool104/kernel-zenfone-4
arch/arm/common/scoop.c
5240
6980
/* * Support code for the SCOOP interface found on various Sharp PDAs * * Copyright (c) 2004 Richard Purdie * * Based on code written by Sharp/Lineo for 2.4 kernels * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/gpio.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/export.h> #include <linux/io.h> #include <asm/hardware/scoop.h> /* PCMCIA to Scoop linkage There is no easy way to link multiple scoop devices into one single entity for the pxa2xx_pcmcia device so this structure is used which is setup by the platform code. This file is never modular so this symbol is always accessile to the board support files. */ struct scoop_pcmcia_config *platform_scoop_config; EXPORT_SYMBOL(platform_scoop_config); struct scoop_dev { void __iomem *base; struct gpio_chip gpio; spinlock_t scoop_lock; unsigned short suspend_clr; unsigned short suspend_set; u32 scoop_gpwr; }; void reset_scoop(struct device *dev) { struct scoop_dev *sdev = dev_get_drvdata(dev); iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */ iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */ iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */ iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */ iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */ iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */ iowrite16(0x0000, sdev->base + SCOOP_IRM); } static void __scoop_gpio_set(struct scoop_dev *sdev, unsigned offset, int value) { unsigned short gpwr; gpwr = ioread16(sdev->base + SCOOP_GPWR); if (value) gpwr |= 1 << (offset + 1); else gpwr &= ~(1 << (offset + 1)); iowrite16(gpwr, sdev->base + SCOOP_GPWR); } static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); unsigned long flags; spin_lock_irqsave(&sdev->scoop_lock, flags); __scoop_gpio_set(sdev, offset, value); spin_unlock_irqrestore(&sdev->scoop_lock, flags); } static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset) { struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); /* XXX: I'm unsure, but it seems so */ return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1)); } static int scoop_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); unsigned long flags; unsigned short gpcr; spin_lock_irqsave(&sdev->scoop_lock, flags); gpcr = ioread16(sdev->base + SCOOP_GPCR); gpcr &= ~(1 << (offset + 1)); iowrite16(gpcr, sdev->base + SCOOP_GPCR); spin_unlock_irqrestore(&sdev->scoop_lock, flags); return 0; } static int scoop_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); unsigned long flags; unsigned short gpcr; spin_lock_irqsave(&sdev->scoop_lock, flags); __scoop_gpio_set(sdev, offset, value); gpcr = ioread16(sdev->base + SCOOP_GPCR); gpcr |= 1 << (offset + 1); iowrite16(gpcr, sdev->base + SCOOP_GPCR); spin_unlock_irqrestore(&sdev->scoop_lock, flags); return 0; } unsigned short read_scoop_reg(struct device *dev, unsigned short reg) { struct scoop_dev *sdev = dev_get_drvdata(dev); return ioread16(sdev->base + reg); } void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data) { struct scoop_dev *sdev = dev_get_drvdata(dev); iowrite16(data, sdev->base + reg); } EXPORT_SYMBOL(reset_scoop); EXPORT_SYMBOL(read_scoop_reg); EXPORT_SYMBOL(write_scoop_reg); #ifdef CONFIG_PM static void check_scoop_reg(struct scoop_dev *sdev) { unsigned short mcr; mcr = ioread16(sdev->base + SCOOP_MCR); if ((mcr & 0x100) == 0) iowrite16(0x0101, sdev->base + SCOOP_MCR); } static int scoop_suspend(struct platform_device *dev, pm_message_t state) { struct scoop_dev *sdev = platform_get_drvdata(dev); check_scoop_reg(sdev); sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR); iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR); return 0; } static int scoop_resume(struct platform_device *dev) { struct scoop_dev *sdev = platform_get_drvdata(dev); check_scoop_reg(sdev); iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR); return 0; } #else #define scoop_suspend NULL #define scoop_resume NULL #endif static int __devinit scoop_probe(struct platform_device *pdev) { struct scoop_dev *devptr; struct scoop_config *inf; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int ret; int temp; if (!mem) return -EINVAL; devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL); if (!devptr) return -ENOMEM; spin_lock_init(&devptr->scoop_lock); inf = pdev->dev.platform_data; devptr->base = ioremap(mem->start, resource_size(mem)); if (!devptr->base) { ret = -ENOMEM; goto err_ioremap; } platform_set_drvdata(pdev, devptr); printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base); iowrite16(0x0140, devptr->base + SCOOP_MCR); reset_scoop(&pdev->dev); iowrite16(0x0000, devptr->base + SCOOP_CPR); iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR); iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR); devptr->suspend_clr = inf->suspend_clr; devptr->suspend_set = inf->suspend_set; devptr->gpio.base = -1; if (inf->gpio_base != 0) { devptr->gpio.label = dev_name(&pdev->dev); devptr->gpio.base = inf->gpio_base; devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */ devptr->gpio.set = scoop_gpio_set; devptr->gpio.get = scoop_gpio_get; devptr->gpio.direction_input = scoop_gpio_direction_input; devptr->gpio.direction_output = scoop_gpio_direction_output; ret = gpiochip_add(&devptr->gpio); if (ret) goto err_gpio; } return 0; if (devptr->gpio.base != -1) temp = gpiochip_remove(&devptr->gpio); err_gpio: platform_set_drvdata(pdev, NULL); err_ioremap: iounmap(devptr->base); kfree(devptr); return ret; } static int __devexit scoop_remove(struct platform_device *pdev) { struct scoop_dev *sdev = platform_get_drvdata(pdev); int ret; if (!sdev) return -EINVAL; if (sdev->gpio.base != -1) { ret = gpiochip_remove(&sdev->gpio); if (ret) { dev_err(&pdev->dev, "Can't remove gpio chip: %d\n", ret); return ret; } } platform_set_drvdata(pdev, NULL); iounmap(sdev->base); kfree(sdev); return 0; } static struct platform_driver scoop_driver = { .probe = scoop_probe, .remove = __devexit_p(scoop_remove), .suspend = scoop_suspend, .resume = scoop_resume, .driver = { .name = "sharp-scoop", }, }; static int __init scoop_init(void) { return platform_driver_register(&scoop_driver); } subsys_initcall(scoop_init);
gpl-2.0
somcom3x/android_kernel_nvidia_tegratab
drivers/cpufreq/freq_table.c
6520
6165
/* * linux/drivers/cpufreq/freq_table.c * * Copyright (C) 2002 - 2003 Dominik Brodowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int min_freq = ~0; unsigned int max_freq = 0; unsigned int i; for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { pr_debug("table entry %u is invalid, skipping\n", i); continue; } pr_debug("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; if (freq > max_freq) max_freq = freq; } policy->min = policy->cpuinfo.min_freq = min_freq; policy->max = policy->cpuinfo.max_freq = max_freq; if (policy->min == ~0) return -EINVAL; else return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0; unsigned int i; unsigned int count = 0; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) count++; else if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!count) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int target_freq, unsigned int relation, unsigned int *index) { struct cpufreq_frequency_table optimal = { .index = ~0, .frequency = 0, }; struct cpufreq_frequency_table suboptimal = { .index = ~0, .frequency = 0, }; unsigned int i; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { case CPUFREQ_RELATION_H: suboptimal.frequency = ~0; break; case CPUFREQ_RELATION_L: optimal.frequency = ~0; break; } if (!cpu_online(policy->cpu)) return -EINVAL; for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq < policy->min) || (freq > policy->max)) continue; switch (relation) { case CPUFREQ_RELATION_H: if (freq <= target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; optimal.index = i; } } else { if (freq <= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.index = i; } } break; case CPUFREQ_RELATION_L: if (freq >= target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; optimal.index = i; } } else { if (freq >= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.index = i; } } break; } } if (optimal.index > i) { if (suboptimal.index > i) return -EINVAL; *index = suboptimal.index; } else *index = optimal.index; pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); /** * show_available_freqs - show available frequencies for the specified CPU */ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) { unsigned int i = 0; unsigned int cpu = policy->cpu; ssize_t count = 0; struct cpufreq_frequency_table *table; if (!per_cpu(cpufreq_show_table, cpu)) return -ENODEV; table = per_cpu(cpufreq_show_table, cpu); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; count += sprintf(&buf[count], "%d ", table[i].frequency); } count += sprintf(&buf[count], "\n"); return count; } struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { .attr = { .name = "scaling_available_frequencies", .mode = 0444, }, .show = show_available_freqs, }; EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* * if you use these, you must assure that the frequency table is valid * all the time between get_attr and put_attr! */ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) { return per_cpu(cpufreq_show_table, cpu); } EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL");
gpl-2.0
BRNmod/android_kernel_motorola_msm8226
sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
121
37605
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include "msm-pcm-q6-v2.h" #include "msm-pcm-routing-v2.h" #include "q6voice.h" #include "audio_ocmem.h" #define SHARED_MEM_BUF 2 #define VOIP_MAX_Q_LEN 10 #define VOIP_MAX_VOC_PKT_SIZE 4096 #define VOIP_MIN_VOC_PKT_SIZE 320 /* Length of the DSP frame info header added to the voc packet. */ #define DSP_FRAME_HDR_LEN 1 #define MODE_IS127 0x2 #define MODE_4GV_NB 0x3 #define MODE_4GV_WB 0x4 #define MODE_AMR 0x5 #define MODE_AMR_WB 0xD #define MODE_PCM 0xC #define MODE_4GV_NW 0xE #define VOIP_MODE_MAX MODE_4GV_NW #define VOIP_RATE_MAX 23850 enum format { FORMAT_S16_LE = 2, FORMAT_SPECIAL = 31, }; enum amr_rate_type { AMR_RATE_4750, /* AMR 4.75 kbps */ AMR_RATE_5150, /* AMR 5.15 kbps */ AMR_RATE_5900, /* AMR 5.90 kbps */ AMR_RATE_6700, /* AMR 6.70 kbps */ AMR_RATE_7400, /* AMR 7.40 kbps */ AMR_RATE_7950, /* AMR 7.95 kbps */ AMR_RATE_10200, /* AMR 10.20 kbps */ AMR_RATE_12200, /* AMR 12.20 kbps */ AMR_RATE_6600, /* AMR-WB 6.60 kbps */ AMR_RATE_8850, /* AMR-WB 8.85 kbps */ AMR_RATE_12650, /* AMR-WB 12.65 kbps */ AMR_RATE_14250, /* AMR-WB 14.25 kbps */ AMR_RATE_15850, /* AMR-WB 15.85 kbps */ AMR_RATE_18250, /* AMR-WB 18.25 kbps */ AMR_RATE_19850, /* AMR-WB 19.85 kbps */ AMR_RATE_23050, /* AMR-WB 23.05 kbps */ AMR_RATE_23850, /* AMR-WB 23.85 kbps */ AMR_RATE_UNDEF }; enum voip_state { VOIP_STOPPED, VOIP_STARTED, }; struct voip_frame_hdr { uint32_t timestamp; union { uint32_t frame_type; uint32_t packet_rate; }; }; struct voip_frame { struct voip_frame_hdr frm_hdr; uint32_t pktlen; uint8_t voc_pkt[VOIP_MAX_VOC_PKT_SIZE]; }; struct voip_buf_node { struct list_head list; struct voip_frame frame; }; struct voip_drv_info { enum voip_state state; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct list_head in_queue; struct list_head free_in_queue; struct list_head out_queue; struct list_head free_out_queue; wait_queue_head_t out_wait; wait_queue_head_t in_wait; struct mutex lock; spinlock_t dsp_lock; spinlock_t dsp_ul_lock; uint32_t mode; uint32_t rate_type; uint32_t rate; uint32_t dtx_mode; uint8_t capture_start; uint8_t playback_start; uint8_t playback_instance; uint8_t capture_instance; unsigned int play_samp_rate; unsigned int cap_samp_rate; unsigned int pcm_size; unsigned int pcm_count; unsigned int pcm_playback_irq_pos; /* IRQ position */ unsigned int pcm_playback_buf_pos; /* position in buffer */ unsigned int pcm_capture_size; unsigned int pcm_capture_count; unsigned int pcm_capture_irq_pos; /* IRQ position */ unsigned int pcm_capture_buf_pos; /* position in buffer */ uint32_t evrc_min_rate; uint32_t evrc_max_rate; }; static int voip_get_media_type(uint32_t mode, unsigned int samp_rate, unsigned int *media_type); static int voip_get_rate_type(uint32_t mode, uint32_t rate, uint32_t *rate_type); static int voip_config_vocoder(struct snd_pcm_substream *substream); static int msm_voip_mode_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_mode_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_evrc_min_max_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_evrc_min_max_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static struct voip_drv_info voip_info; static struct snd_pcm_hardware msm_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED), .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_SPECIAL, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = sizeof(struct voip_buf_node) * VOIP_MAX_Q_LEN, .period_bytes_min = VOIP_MIN_VOC_PKT_SIZE, .period_bytes_max = VOIP_MAX_VOC_PKT_SIZE, .periods_min = 2, .periods_max = VOIP_MAX_Q_LEN, .fifo_size = 0, }; static int msm_voip_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int mute = ucontrol->value.integer.value[0]; int ramp_duration = ucontrol->value.integer.value[1]; if ((mute < 0) || (mute > 1) || (ramp_duration < 0)) { pr_err(" %s Invalid arguments", __func__); ret = -EINVAL; goto done; } pr_debug("%s: mute=%d ramp_duration=%d\n", __func__, mute, ramp_duration); voc_set_tx_mute(voc_get_session_id(VOIP_SESSION_NAME), TX_PATH, mute, ramp_duration); done: return ret; } static int msm_voip_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int volume = ucontrol->value.integer.value[0]; int ramp_duration = ucontrol->value.integer.value[1]; if ((volume < 0) || (ramp_duration < 0)) { pr_err(" %s Invalid arguments", __func__); ret = -EINVAL; goto done; } pr_debug("%s: volume: %d ramp_duration: %d\n", __func__, volume, ramp_duration); voc_set_rx_vol_step(voc_get_session_id(VOIP_SESSION_NAME), RX_PATH, volume, ramp_duration); done: return ret; } static int msm_voip_dtx_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.dtx_mode = ucontrol->value.integer.value[0]; pr_debug("%s: dtx: %d\n", __func__, voip_info.dtx_mode); mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_dtx_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.dtx_mode; mutex_unlock(&voip_info.lock); return 0; } static struct snd_kcontrol_new msm_voip_controls[] = { SOC_SINGLE_MULTI_EXT("Voip Tx Mute", SND_SOC_NOPM, 0, MAX_RAMP_DURATION, 0, 2, NULL, msm_voip_mute_put), SOC_SINGLE_MULTI_EXT("Voip Rx Gain", SND_SOC_NOPM, 0, MAX_RAMP_DURATION, 0, 2, NULL, msm_voip_gain_put), SOC_SINGLE_EXT("Voip Mode Config", SND_SOC_NOPM, 0, VOIP_MODE_MAX, 0, msm_voip_mode_config_get, msm_voip_mode_config_put), SOC_SINGLE_EXT("Voip Rate Config", SND_SOC_NOPM, 0, VOIP_RATE_MAX, 0, msm_voip_rate_config_get, msm_voip_rate_config_put), SOC_SINGLE_MULTI_EXT("Voip Evrc Min Max Rate Config", SND_SOC_NOPM, 0, VOC_1_RATE, 0, 2, msm_voip_evrc_min_max_rate_config_get, msm_voip_evrc_min_max_rate_config_put), SOC_SINGLE_EXT("Voip Dtx Mode", SND_SOC_NOPM, 0, 1, 0, msm_voip_dtx_mode_get, msm_voip_dtx_mode_put), }; static int msm_pcm_voip_probe(struct snd_soc_platform *platform) { snd_soc_add_platform_controls(platform, msm_voip_controls, ARRAY_SIZE(msm_voip_controls)); return 0; } /* sample rate supported */ static unsigned int supported_sample_rates[] = {8000, 16000}; /* capture path */ static void voip_process_ul_pkt(uint8_t *voc_pkt, uint32_t pkt_len, uint32_t timestamp, void *private_data) { struct voip_buf_node *buf_node = NULL; struct voip_drv_info *prtd = private_data; unsigned long dsp_flags; if (prtd->capture_substream == NULL) return; /* Copy up-link packet into out_queue. */ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); /* discarding UL packets till start is received */ if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) { buf_node = list_first_entry(&prtd->free_out_queue, struct voip_buf_node, list); list_del(&buf_node->list); switch (prtd->mode) { case MODE_AMR_WB: case MODE_AMR: { /* Remove the DSP frame info header. Header format: * Bits 0-3: Frame rate * Bits 4-7: Frame type */ buf_node->frame.frm_hdr.timestamp = timestamp; buf_node->frame.frm_hdr.frame_type = ((*voc_pkt) & 0xF0) >> 4; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; buf_node->frame.pktlen = pkt_len - DSP_FRAME_HDR_LEN; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->out_queue); break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: case MODE_4GV_NW: { /* Remove the DSP frame info header. * Header format: * Bits 0-3: frame rate */ buf_node->frame.frm_hdr.timestamp = timestamp; buf_node->frame.frm_hdr.packet_rate = (*voc_pkt) & 0x0F; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; buf_node->frame.pktlen = pkt_len - DSP_FRAME_HDR_LEN; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->out_queue); break; } default: { buf_node->frame.frm_hdr.timestamp = timestamp; buf_node->frame.pktlen = pkt_len; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->out_queue); } } pr_debug("%s: pkt_len =%d, frame.pktlen=%d, timestamp=%d\n", __func__, pkt_len, buf_node->frame.pktlen, timestamp); prtd->pcm_capture_irq_pos += prtd->pcm_capture_count; spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); snd_pcm_period_elapsed(prtd->capture_substream); } else { spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); pr_err("UL data dropped\n"); } wake_up(&prtd->out_wait); } /* playback path */ static void voip_process_dl_pkt(uint8_t *voc_pkt, void *private_data) { struct voip_buf_node *buf_node = NULL; struct voip_drv_info *prtd = private_data; unsigned long dsp_flags; if (prtd->playback_substream == NULL) return; spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); if (!list_empty(&prtd->in_queue) && prtd->playback_start) { buf_node = list_first_entry(&prtd->in_queue, struct voip_buf_node, list); list_del(&buf_node->list); switch (prtd->mode) { case MODE_AMR: case MODE_AMR_WB: { *((uint32_t *)voc_pkt) = buf_node->frame.pktlen + DSP_FRAME_HDR_LEN; /* Advance to the header of voip packet */ voc_pkt = voc_pkt + sizeof(uint32_t); /* * Add the DSP frame info header. Header format: * Bits 0-3: Frame rate * Bits 4-7: Frame type */ *voc_pkt = ((buf_node->frame.frm_hdr.frame_type & 0x0F) << 4) | (prtd->rate_type & 0x0F); voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->free_in_queue); break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: case MODE_4GV_NW: { *((uint32_t *)voc_pkt) = buf_node->frame.pktlen + DSP_FRAME_HDR_LEN; /* Advance to the header of voip packet */ voc_pkt = voc_pkt + sizeof(uint32_t); /* * Add the DSP frame info header. Header format: * Bits 0-3 : Frame rate */ *voc_pkt = buf_node->frame.frm_hdr.packet_rate & 0x0F; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->free_in_queue); break; } default: { *((uint32_t *)voc_pkt) = buf_node->frame.pktlen; voc_pkt = voc_pkt + sizeof(uint32_t); memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.pktlen); list_add_tail(&buf_node->list, &prtd->free_in_queue); } } pr_debug("%s: frame.pktlen=%d\n", __func__, buf_node->frame.pktlen); prtd->pcm_playback_irq_pos += prtd->pcm_count; spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); snd_pcm_period_elapsed(prtd->playback_substream); } else { *((uint32_t *)voc_pkt) = 0; spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); pr_err("DL data not available\n"); } wake_up(&prtd->in_wait); } static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; prtd->play_samp_rate = runtime->rate; prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_playback_irq_pos = 0; prtd->pcm_playback_buf_pos = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; int ret = 0; prtd->cap_samp_rate = runtime->rate; prtd->pcm_capture_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_capture_irq_pos = 0; prtd->pcm_capture_buf_pos = 0; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: pr_debug("%s: Trigger start\n", __func__); if ((!prtd->capture_start) && (!prtd->playback_start)) voice_ocmem_process_req(VOICE, true); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) prtd->capture_start = 1; else prtd->playback_start = 1; break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); if (prtd->capture_start && prtd->playback_start) voice_ocmem_process_req(VOICE, false); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->playback_start = 0; else prtd->capture_start = 0; break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = &voip_info; int ret = 0; pr_debug("%s, VoIP\n", __func__); mutex_lock(&prtd->lock); runtime->hw = msm_pcm_hardware; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_debug("snd_pcm_hw_constraint_list failed\n"); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { pr_debug("snd_pcm_hw_constraint_integer failed\n"); goto err; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prtd->playback_substream = substream; prtd->playback_instance++; } else { prtd->capture_substream = substream; prtd->capture_instance++; } runtime->private_data = prtd; err: mutex_unlock(&prtd->lock); return ret; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; struct voip_buf_node *buf_node = NULL; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; unsigned long dsp_flags; int count = frames_to_bytes(runtime, frames); pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames); ret = wait_event_interruptible_timeout(prtd->in_wait, (!list_empty(&prtd->free_in_queue) || prtd->state == VOIP_STOPPED), 1 * HZ); if (ret > 0) { if (count <= VOIP_MAX_VOC_PKT_SIZE) { spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); buf_node = list_first_entry(&prtd->free_in_queue, struct voip_buf_node, list); list_del(&buf_node->list); spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); if (prtd->mode == MODE_PCM) { ret = copy_from_user(&buf_node->frame.voc_pkt, buf, count); buf_node->frame.pktlen = count; } else ret = copy_from_user(&buf_node->frame, buf, count); spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); list_add_tail(&buf_node->list, &prtd->in_queue); spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); } else { pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n", __func__, count); ret = -ENOMEM; } } else if (ret == 0) { pr_err("%s: No free DL buffs\n", __func__); ret = -ETIMEDOUT; } else { pr_err("%s: playback copy was interrupted %d\n", __func__, ret); } return ret; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int count = 0; struct voip_buf_node *buf_node = NULL; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; unsigned long dsp_flags; int size; count = frames_to_bytes(runtime, frames); pr_debug("%s: count = %d\n", __func__, count); ret = wait_event_interruptible_timeout(prtd->out_wait, (!list_empty(&prtd->out_queue) || prtd->state == VOIP_STOPPED), 1 * HZ); if (ret > 0) { if (count <= VOIP_MAX_VOC_PKT_SIZE) { spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); buf_node = list_first_entry(&prtd->out_queue, struct voip_buf_node, list); list_del(&buf_node->list); spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); if (prtd->mode == MODE_PCM) { ret = copy_to_user(buf, &buf_node->frame.voc_pkt, buf_node->frame.pktlen); } else { size = sizeof(buf_node->frame.frm_hdr) + sizeof(buf_node->frame.pktlen) + buf_node->frame.pktlen; ret = copy_to_user(buf, &buf_node->frame, size); } if (ret) { pr_err("%s: Copy to user retuned %d\n", __func__, ret); ret = -EFAULT; } spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); list_add_tail(&buf_node->list, &prtd->free_out_queue); spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); } else { pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n", __func__, count); ret = -ENOMEM; } } else if (ret == 0) { pr_err("%s: No UL data available\n", __func__); ret = -ETIMEDOUT; } else { pr_err("%s: Read was interrupted\n", __func__); ret = -ERESTARTSYS; } return ret; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; struct list_head *ptr = NULL; struct list_head *next = NULL; struct voip_buf_node *buf_node = NULL; struct snd_dma_buffer *p_dma_buf, *c_dma_buf; struct snd_pcm_substream *p_substream, *c_substream; struct snd_pcm_runtime *runtime; struct voip_drv_info *prtd; unsigned long dsp_flags; if (substream == NULL) { pr_err("substream is NULL\n"); return -EINVAL; } runtime = substream->runtime; prtd = runtime->private_data; wake_up(&prtd->out_wait); mutex_lock(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->playback_instance--; else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) prtd->capture_instance--; if (!prtd->playback_instance && !prtd->capture_instance) { if (prtd->state == VOIP_STARTED) { prtd->state = VOIP_STOPPED; voc_end_voice_call( voc_get_session_id(VOIP_SESSION_NAME)); voc_register_mvs_cb(NULL, NULL, prtd); } /* release all buffer */ /* release in_queue and free_in_queue */ pr_debug("release all buffer\n"); p_substream = prtd->playback_substream; if (p_substream == NULL) { pr_debug("p_substream is NULL\n"); goto capt; } p_dma_buf = &p_substream->dma_buffer; if (p_dma_buf == NULL) { pr_debug("p_dma_buf is NULL\n"); goto capt; } if (p_dma_buf->area != NULL) { spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); list_for_each_safe(ptr, next, &prtd->in_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } list_for_each_safe(ptr, next, &prtd->free_in_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); dma_free_coherent(p_substream->pcm->card->dev, runtime->hw.buffer_bytes_max, p_dma_buf->area, p_dma_buf->addr); p_dma_buf->area = NULL; } /* release out_queue and free_out_queue */ capt: c_substream = prtd->capture_substream; if (c_substream == NULL) { pr_debug("c_substream is NULL\n"); goto done; } c_dma_buf = &c_substream->dma_buffer; if (c_substream == NULL) { pr_debug("c_dma_buf is NULL.\n"); goto done; } if (c_dma_buf->area != NULL) { spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); list_for_each_safe(ptr, next, &prtd->out_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } list_for_each_safe(ptr, next, &prtd->free_out_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); dma_free_coherent(c_substream->pcm->card->dev, runtime->hw.buffer_bytes_max, c_dma_buf->area, c_dma_buf->addr); c_dma_buf->area = NULL; } done: prtd->capture_substream = NULL; prtd->playback_substream = NULL; } mutex_unlock(&prtd->lock); return ret; } static int voip_config_vocoder(struct snd_pcm_substream *substream) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; uint32_t media_type = 0; uint32_t rate_type = 0; uint32_t evrc_min_rate_type = 0; uint32_t evrc_max_rate_type = 0; pr_debug("%s(): mode=%d, playback sample rate=%d, capture sample rate=%d\n", __func__, prtd->mode, prtd->play_samp_rate, prtd->cap_samp_rate); if ((runtime->format != FORMAT_S16_LE) && ((prtd->mode == MODE_PCM) || (prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) || (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) || (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW))) { pr_err("%s(): mode:%d and format:%u are not matched\n", __func__, prtd->mode, (uint32_t)runtime->format); ret = -EINVAL; goto done; } ret = voip_get_media_type(prtd->mode, prtd->play_samp_rate, &media_type); if (ret < 0) { pr_err("%s(): fail at getting media_type, ret=%d\n", __func__, ret); ret = -EINVAL; goto done; } pr_debug("%s(): media_type=%d\n", __func__, media_type); if ((prtd->mode == MODE_PCM) || (prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB)) { ret = voip_get_rate_type(prtd->mode, prtd->rate, &rate_type); if (ret < 0) { pr_err("%s(): fail at getting rate_type, ret=%d\n", __func__, ret); ret = -EINVAL; goto done; } prtd->rate_type = rate_type; pr_debug("rate_type=%d\n", rate_type); } else if ((prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) || (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW)) { ret = voip_get_rate_type(prtd->mode, prtd->evrc_min_rate, &evrc_min_rate_type); if (ret < 0) { pr_err("%s(): fail at getting min rate, ret=%d\n", __func__, ret); ret = -EINVAL; goto done; } if (evrc_min_rate_type == VOC_0_RATE) evrc_min_rate_type = VOC_8_RATE; ret = voip_get_rate_type(prtd->mode, prtd->evrc_max_rate, &evrc_max_rate_type); if (ret < 0) { pr_err("%s(): fail at getting max rate, ret=%d\n", __func__, ret); ret = -EINVAL; goto done; } if (evrc_max_rate_type == VOC_0_RATE) evrc_max_rate_type = VOC_1_RATE; if (evrc_max_rate_type < evrc_min_rate_type) { pr_err("%s(): Invalid EVRC min max rates: %d, %d\n", __func__, evrc_min_rate_type, evrc_max_rate_type); ret = -EINVAL; goto done; } pr_debug("%s(): min rate=%d, max rate=%d\n", __func__, evrc_min_rate_type, evrc_max_rate_type); } if ((prtd->play_samp_rate == 8000) && (prtd->cap_samp_rate == 8000)) voc_config_vocoder(media_type, rate_type, VSS_NETWORK_ID_VOIP_NB, voip_info.dtx_mode, evrc_min_rate_type, evrc_max_rate_type); else if ((prtd->play_samp_rate == 16000) && (prtd->cap_samp_rate == 16000)) voc_config_vocoder(media_type, rate_type, VSS_NETWORK_ID_VOIP_WB, voip_info.dtx_mode, evrc_min_rate_type, evrc_max_rate_type); else { pr_debug("%s: Invalid rate playback %d, capture %d\n", __func__, prtd->play_samp_rate, prtd->cap_samp_rate); ret = -EINVAL; } done: return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; mutex_lock(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); if (prtd->playback_instance && prtd->capture_instance && (prtd->state != VOIP_STARTED)) { ret = voip_config_vocoder(substream); if (ret < 0) { pr_err("%s(): fail at configuring vocoder for voip, ret=%d\n", __func__, ret); goto done; } voc_register_mvs_cb(voip_process_ul_pkt, voip_process_dl_pkt, prtd); ret = voc_start_voice_call( voc_get_session_id(VOIP_SESSION_NAME)); if (ret < 0) { pr_err("%s: voc_start_voice_call() failed err %d", __func__, ret); goto done; } prtd->state = VOIP_STARTED; } done: mutex_unlock(&prtd->lock); return ret; } static snd_pcm_uframes_t msm_pcm_playback_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; pr_debug("%s\n", __func__); if (prtd->pcm_playback_irq_pos >= prtd->pcm_size) prtd->pcm_playback_irq_pos = 0; return bytes_to_frames(runtime, (prtd->pcm_playback_irq_pos)); } static snd_pcm_uframes_t msm_pcm_capture_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size) prtd->pcm_capture_irq_pos = 0; return bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos)); } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { snd_pcm_uframes_t ret = 0; pr_debug("%s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_pointer(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_pointer(substream); return ret; } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; pr_debug("%s\n", __func__); dma_mmap_coherent(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return 0; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct voip_buf_node *buf_node = NULL; int i = 0, offset = 0; pr_debug("%s: voip\n", __func__); mutex_lock(&voip_info.lock); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev, runtime->hw.buffer_bytes_max, &dma_buf->addr, GFP_KERNEL); if (!dma_buf->area) { pr_err("%s:MSM VOIP dma_alloc failed\n", __func__); mutex_unlock(&voip_info.lock); return -ENOMEM; } dma_buf->bytes = runtime->hw.buffer_bytes_max; memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < VOIP_MAX_Q_LEN; i++) { buf_node = (void *)dma_buf->area + offset; list_add_tail(&buf_node->list, &voip_info.free_in_queue); offset = offset + sizeof(struct voip_buf_node); } } else { for (i = 0; i < VOIP_MAX_Q_LEN; i++) { buf_node = (void *) dma_buf->area + offset; list_add_tail(&buf_node->list, &voip_info.free_out_queue); offset = offset + sizeof(struct voip_buf_node); } } mutex_unlock(&voip_info.lock); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static int msm_voip_mode_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.mode; mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_mode_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.mode = ucontrol->value.integer.value[0]; pr_debug("%s: mode=%d\n", __func__, voip_info.mode); mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.rate; mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.rate = ucontrol->value.integer.value[0]; pr_debug("%s: rate=%d\n", __func__, voip_info.rate); mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_evrc_min_max_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.evrc_min_rate; ucontrol->value.integer.value[1] = voip_info.evrc_max_rate; mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_evrc_min_max_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.evrc_min_rate = ucontrol->value.integer.value[0]; voip_info.evrc_max_rate = ucontrol->value.integer.value[1]; pr_debug("%s(): evrc_min_rate=%d,evrc_max_rate=%d\n", __func__, voip_info.evrc_min_rate, voip_info.evrc_max_rate); mutex_unlock(&voip_info.lock); return 0; } static int voip_get_rate_type(uint32_t mode, uint32_t rate, uint32_t *rate_type) { int ret = 0; switch (mode) { case MODE_AMR: { switch (rate) { case 4750: *rate_type = AMR_RATE_4750; break; case 5150: *rate_type = AMR_RATE_5150; break; case 5900: *rate_type = AMR_RATE_5900; break; case 6700: *rate_type = AMR_RATE_6700; break; case 7400: *rate_type = AMR_RATE_7400; break; case 7950: *rate_type = AMR_RATE_7950; break; case 10200: *rate_type = AMR_RATE_10200; break; case 12200: *rate_type = AMR_RATE_12200; break; default: pr_err("wrong rate for AMR NB.\n"); ret = -EINVAL; break; } break; } case MODE_AMR_WB: { switch (rate) { case 6600: *rate_type = AMR_RATE_6600 - AMR_RATE_6600; break; case 8850: *rate_type = AMR_RATE_8850 - AMR_RATE_6600; break; case 12650: *rate_type = AMR_RATE_12650 - AMR_RATE_6600; break; case 14250: *rate_type = AMR_RATE_14250 - AMR_RATE_6600; break; case 15850: *rate_type = AMR_RATE_15850 - AMR_RATE_6600; break; case 18250: *rate_type = AMR_RATE_18250 - AMR_RATE_6600; break; case 19850: *rate_type = AMR_RATE_19850 - AMR_RATE_6600; break; case 23050: *rate_type = AMR_RATE_23050 - AMR_RATE_6600; break; case 23850: *rate_type = AMR_RATE_23850 - AMR_RATE_6600; break; default: pr_err("wrong rate for AMR_WB.\n"); ret = -EINVAL; break; } break; } case MODE_PCM: { *rate_type = 0; break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: { switch (rate) { case VOC_0_RATE: case VOC_8_RATE: case VOC_4_RATE: case VOC_2_RATE: case VOC_1_RATE: *rate_type = rate; break; default: pr_err("wrong rate for IS127/4GV_NB/WB.\n"); ret = -EINVAL; break; } break; } case MODE_4GV_NW: { switch (rate) { case VOC_0_RATE: case VOC_8_RATE: case VOC_4_RATE: case VOC_2_RATE: case VOC_1_RATE: case VOC_8_RATE_NC: *rate_type = rate; break; default: pr_err("wrong rate for 4GV_NW.\n"); ret = -EINVAL; break; } break; } default: pr_err("wrong mode type.\n"); ret = -EINVAL; } pr_debug("%s, mode=%d, rate=%u, rate_type=%d\n", __func__, mode, rate, *rate_type); return ret; } static int voip_get_media_type(uint32_t mode, unsigned int samp_rate, unsigned int *media_type) { int ret = 0; pr_debug("%s: mode=%d, samp_rate=%d\n", __func__, mode, samp_rate); switch (mode) { case MODE_AMR: *media_type = VSS_MEDIA_ID_AMR_NB_MODEM; break; case MODE_AMR_WB: *media_type = VSS_MEDIA_ID_AMR_WB_MODEM; break; case MODE_PCM: if (samp_rate == 8000) *media_type = VSS_MEDIA_ID_PCM_NB; else *media_type = VSS_MEDIA_ID_PCM_WB; break; case MODE_IS127: /* EVRC-A */ *media_type = VSS_MEDIA_ID_EVRC_MODEM; break; case MODE_4GV_NB: /* EVRC-B */ *media_type = VSS_MEDIA_ID_4GV_NB_MODEM; break; case MODE_4GV_WB: /* EVRC-WB */ *media_type = VSS_MEDIA_ID_4GV_WB_MODEM; break; case MODE_4GV_NW: /* EVRC-NW */ *media_type = VSS_MEDIA_ID_4GV_NW_MODEM; break; default: pr_debug(" input mode is not supported\n"); ret = -EINVAL; } pr_debug("%s: media_type is 0x%x\n", __func__, *media_type); return ret; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; pr_debug("msm_asoc_pcm_new\n"); if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, .probe = msm_pcm_voip_probe, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { int rc; if (!is_voc_initialized()) { pr_debug("%s: voice module not initialized yet, deferring probe()\n", __func__); rc = -EPROBE_DEFER; goto done; } rc = voc_alloc_cal_shared_memory(); if (rc == -EPROBE_DEFER) { pr_debug("%s: memory allocation for calibration deferred %d\n", __func__, rc); goto done; } else if (rc < 0) { pr_err("%s: memory allocation for calibration failed %d\n", __func__, rc); } rc = voc_alloc_voip_shared_memory(); if (rc < 0) { pr_err("%s: error allocating shared mem err %d\n", __func__, rc); } if (pdev->dev.of_node) dev_set_name(&pdev->dev, "%s", "msm-voip-dsp"); pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); rc = snd_soc_register_platform(&pdev->dev, &msm_soc_platform); done: return rc; } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static const struct of_device_id msm_voip_dt_match[] = { {.compatible = "qcom,msm-voip-dsp"}, {} }; MODULE_DEVICE_TABLE(of, msm_voip_dt_match); static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-voip-dsp", .owner = THIS_MODULE, .of_match_table = msm_voip_dt_match, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { memset(&voip_info, 0, sizeof(voip_info)); voip_info.mode = MODE_PCM; mutex_init(&voip_info.lock); spin_lock_init(&voip_info.dsp_lock); spin_lock_init(&voip_info.dsp_ul_lock); init_waitqueue_head(&voip_info.out_wait); init_waitqueue_head(&voip_info.in_wait); INIT_LIST_HEAD(&voip_info.in_queue); INIT_LIST_HEAD(&voip_info.free_in_queue); INIT_LIST_HEAD(&voip_info.out_queue); INIT_LIST_HEAD(&voip_info.free_out_queue); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
crysehillmes/android_kernel_samsung_klimtlte
drivers/thermal/spear_thermal.c
121
5342
/* * SPEAr thermal driver. * * Copyright (C) 2011-2012 ST Microelectronics * Author: Vincenzo Frascino <vincenzo.frascino@st.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/platform_data/spear_thermal.h> #include <linux/thermal.h> #define MD_FACTOR 1000 /* SPEAr Thermal Sensor Dev Structure */ struct spear_thermal_dev { /* pointer to base address of the thermal sensor */ void __iomem *thermal_base; /* clk structure */ struct clk *clk; /* pointer to thermal flags */ unsigned int flags; }; static inline int thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *temp) { struct spear_thermal_dev *stdev = thermal->devdata; /* * Data are ready to be read after 628 usec from POWERDOWN signal * (PDN) = 1 */ *temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR; return 0; } static struct thermal_zone_device_ops ops = { .get_temp = thermal_get_temp, }; #ifdef CONFIG_PM static int spear_thermal_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); struct spear_thermal_dev *stdev = spear_thermal->devdata; unsigned int actual_mask = 0; /* Disable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); clk_disable(stdev->clk); dev_info(dev, "Suspended.\n"); return 0; } static int spear_thermal_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); struct spear_thermal_dev *stdev = spear_thermal->devdata; unsigned int actual_mask = 0; int ret = 0; ret = clk_enable(stdev->clk); if (ret) { dev_err(&pdev->dev, "Can't enable clock\n"); return ret; } /* Enable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base); dev_info(dev, "Resumed.\n"); return 0; } #endif static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, spear_thermal_resume); static int spear_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *spear_thermal = NULL; struct spear_thermal_dev *stdev; struct spear_thermal_pdata *pdata; int ret = 0; struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!stres) { dev_err(&pdev->dev, "memory resource missing\n"); return -ENODEV; } pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "platform data is NULL\n"); return -EINVAL; } stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL); if (!stdev) { dev_err(&pdev->dev, "kzalloc fail\n"); return -ENOMEM; } /* Enable thermal sensor */ stdev->thermal_base = devm_ioremap(&pdev->dev, stres->start, resource_size(stres)); if (!stdev->thermal_base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } stdev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(stdev->clk)) { dev_err(&pdev->dev, "Can't get clock\n"); return PTR_ERR(stdev->clk); } ret = clk_enable(stdev->clk); if (ret) { dev_err(&pdev->dev, "Can't enable clock\n"); goto put_clk; } stdev->flags = pdata->thermal_flags; writel_relaxed(stdev->flags, stdev->thermal_base); spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0, stdev, &ops, 0, 0, 0, 0); if (IS_ERR(spear_thermal)) { dev_err(&pdev->dev, "thermal zone device is NULL\n"); ret = PTR_ERR(spear_thermal); goto disable_clk; } platform_set_drvdata(pdev, spear_thermal); dev_info(&spear_thermal->device, "Thermal Sensor Loaded at: 0x%p.\n", stdev->thermal_base); return 0; disable_clk: clk_disable(stdev->clk); put_clk: clk_put(stdev->clk); return ret; } static int spear_thermal_exit(struct platform_device *pdev) { unsigned int actual_mask = 0; struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); struct spear_thermal_dev *stdev = spear_thermal->devdata; thermal_zone_device_unregister(spear_thermal); platform_set_drvdata(pdev, NULL); /* Disable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); clk_disable(stdev->clk); clk_put(stdev->clk); return 0; } static struct platform_driver spear_thermal_driver = { .probe = spear_thermal_probe, .remove = spear_thermal_exit, .driver = { .name = "spear_thermal", .owner = THIS_MODULE, .pm = &spear_thermal_pm_ops, }, }; module_platform_driver(spear_thermal_driver); MODULE_AUTHOR("Vincenzo Frascino <vincenzo.frascino@st.com>"); MODULE_DESCRIPTION("SPEAr thermal driver"); MODULE_LICENSE("GPL");
gpl-2.0
flzyup/2.6.29-kernel-BFS-LiGux-FLZYUP
drivers/base/platform.c
121
23638
/* * platform.c - platform 'pseudo' bus for legacy devices * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * * This file is released under the GPLv2 * * Please see Documentation/driver-model/platform.txt for more * information. */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/slab.h> #include "base.h" #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ driver)) struct device platform_bus = { .init_name = "platform", }; EXPORT_SYMBOL_GPL(platform_bus); /** * platform_get_resource - get a resource for a device * @dev: platform device * @type: resource type * @num: resource index */ struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { int i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (type == resource_type(r) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource); /** * platform_get_irq - get an IRQ for a device * @dev: platform device * @num: IRQ number index */ int platform_get_irq(struct platform_device *dev, unsigned int num) { struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq); /** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type * @name: resource name */ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, char *name) { int i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (type == resource_type(r) && !strcmp(r->name, name)) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource_byname); /** * platform_get_irq - get an IRQ for a device * @dev: platform device * @name: IRQ name */ int platform_get_irq_byname(struct platform_device *dev, char *name) { struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); /** * platform_add_devices - add a numbers of platform devices * @devs: array of platform devices to add * @num: number of platform devices in array */ int platform_add_devices(struct platform_device **devs, int num) { int i, ret = 0; for (i = 0; i < num; i++) { ret = platform_device_register(devs[i]); if (ret) { while (--i >= 0) platform_device_unregister(devs[i]); break; } } return ret; } EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; char name[1]; }; /** * platform_device_put * @pdev: platform device to free * * Free all memory associated with a platform device. This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_put(struct platform_device *pdev) { if (pdev) put_device(&pdev->dev); } EXPORT_SYMBOL_GPL(platform_device_put); static void platform_device_release(struct device *dev) { struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev); kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.resource); kfree(pa); } /** * platform_device_alloc * @name: base name of the device we're adding * @id: instance id * * Create a platform device object which can have other objects attached * to it, and which will have attached objects freed when it is released. */ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; pa->pdev.id = id; device_initialize(&pa->pdev.dev); pa->pdev.dev.release = platform_device_release; } return pa ? &pa->pdev : NULL; } EXPORT_SYMBOL_GPL(platform_device_alloc); /** * platform_device_add_resources * @pdev: platform device allocated by platform_device_alloc to add resources to * @res: set of resources that needs to be allocated for the device * @num: number of resources * * Add a copy of the resources to the platform device. The memory * associated with the resources will be freed when the platform device is * released. */ int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num) { struct resource *r; r = kmalloc(sizeof(struct resource) * num, GFP_KERNEL); if (r) { memcpy(r, res, sizeof(struct resource) * num); pdev->resource = r; pdev->num_resources = num; } return r ? 0 : -ENOMEM; } EXPORT_SYMBOL_GPL(platform_device_add_resources); /** * platform_device_add_data * @pdev: platform device allocated by platform_device_alloc to add resources to * @data: platform specific data for this platform device * @size: size of platform specific data * * Add a copy of platform specific data to the platform device's * platform_data pointer. The memory associated with the platform data * will be freed when the platform device is released. */ int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size) { void *d; d = kmalloc(size, GFP_KERNEL); if (d) { memcpy(d, data, size); pdev->dev.platform_data = d; } return d ? 0 : -ENOMEM; } EXPORT_SYMBOL_GPL(platform_device_add_data); /** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * * This is part 2 of platform_device_register(), though may be called * separately _iff_ pdev was allocated by platform_device_alloc(). */ int platform_device_add(struct platform_device *pdev) { int i, ret = 0; if (!pdev) return -EINVAL; if (!pdev->dev.parent) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; if (pdev->id != -1) dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); else dev_set_name(&pdev->dev, pdev->name); for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p && insert_resource(p, r)) { printk(KERN_ERR "%s: failed to claim resource %d\n", dev_name(&pdev->dev), i); ret = -EBUSY; goto failed; } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(&pdev->dev), dev_name(pdev->dev.parent)); ret = device_add(&pdev->dev); if (ret == 0) return ret; failed: while (--i >= 0) { struct resource *r = &pdev->resource[i]; unsigned long type = resource_type(r); if (type == IORESOURCE_MEM || type == IORESOURCE_IO) release_resource(r); } return ret; } EXPORT_SYMBOL_GPL(platform_device_add); /** * platform_device_del - remove a platform-level device * @pdev: platform device we're removing * * Note that this function will also release all memory- and port-based * resources owned by the device (@dev->resource). This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_del(struct platform_device *pdev) { int i; if (pdev) { device_del(&pdev->dev); for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; unsigned long type = resource_type(r); if (type == IORESOURCE_MEM || type == IORESOURCE_IO) release_resource(r); } } } EXPORT_SYMBOL_GPL(platform_device_del); /** * platform_device_register - add a platform-level device * @pdev: platform device we're adding */ int platform_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); return platform_device_add(pdev); } EXPORT_SYMBOL_GPL(platform_device_register); /** * platform_device_unregister - unregister a platform-level device * @pdev: platform device we're unregistering * * Unregistration is done in 2 steps. First we release all resources * and remove it from the subsystem, then we drop reference count by * calling platform_device_put(). */ void platform_device_unregister(struct platform_device *pdev) { platform_device_del(pdev); platform_device_put(pdev); } EXPORT_SYMBOL_GPL(platform_device_unregister); /** * platform_device_register_simple * @name: base name of the device we're adding * @id: instance id * @res: set of resources that needs to be allocated for the device * @num: number of resources * * This function creates a simple platform device that requires minimal * resource and memory management. Canned release function freeing memory * allocated for the device allows drivers using such devices to be * unloaded without waiting for the last reference to the device to be * dropped. * * This interface is primarily intended for use with legacy drivers which * probe hardware directly. Because such drivers create sysfs device nodes * themselves, rather than letting system infrastructure handle such device * enumeration tasks, they don't fully conform to the Linux driver model. * In particular, when such drivers are built as modules, they can't be * "hotplugged". */ struct platform_device *platform_device_register_simple(const char *name, int id, struct resource *res, unsigned int num) { struct platform_device *pdev; int retval; pdev = platform_device_alloc(name, id); if (!pdev) { retval = -ENOMEM; goto error; } if (num) { retval = platform_device_add_resources(pdev, res, num); if (retval) goto error; } retval = platform_device_add(pdev); if (retval) goto error; return pdev; error: platform_device_put(pdev); return ERR_PTR(retval); } EXPORT_SYMBOL_GPL(platform_device_register_simple); /** * platform_device_register_data * @parent: parent device for the device we're adding * @name: base name of the device we're adding * @id: instance id * @data: platform specific data for this platform device * @size: size of platform specific data * * This function creates a simple platform device that requires minimal * resource and memory management. Canned release function freeing memory * allocated for the device allows drivers using such devices to be * unloaded without waiting for the last reference to the device to be * dropped. */ struct platform_device *platform_device_register_data( struct device *parent, const char *name, int id, const void *data, size_t size) { struct platform_device *pdev; int retval; pdev = platform_device_alloc(name, id); if (!pdev) { retval = -ENOMEM; goto error; } pdev->dev.parent = parent; if (size) { retval = platform_device_add_data(pdev, data, size); if (retval) goto error; } retval = platform_device_add(pdev); if (retval) goto error; return pdev; error: platform_device_put(pdev); return ERR_PTR(retval); } static int platform_drv_probe(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); return drv->probe(dev); } static int platform_drv_probe_fail(struct device *_dev) { return -ENXIO; } static int platform_drv_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); return drv->remove(dev); } static void platform_drv_shutdown(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); drv->shutdown(dev); } static int platform_drv_suspend(struct device *_dev, pm_message_t state) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); return drv->suspend(dev, state); } static int platform_drv_resume(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); return drv->resume(dev); } /** * platform_driver_register * @drv: platform driver structure */ int platform_driver_register(struct platform_driver *drv) { drv->driver.bus = &platform_bus_type; if (drv->probe) drv->driver.probe = platform_drv_probe; if (drv->remove) drv->driver.remove = platform_drv_remove; if (drv->shutdown) drv->driver.shutdown = platform_drv_shutdown; if (drv->suspend) drv->driver.suspend = platform_drv_suspend; if (drv->resume) drv->driver.resume = platform_drv_resume; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_register); /** * platform_driver_unregister * @drv: platform driver structure */ void platform_driver_unregister(struct platform_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_unregister); /** * platform_driver_probe - register driver for non-hotpluggable device * @drv: platform driver structure * @probe: the driver probe routine, probably from an __init section * * Use this instead of platform_driver_register() when you know the device * is not hotpluggable and has already been registered, and you want to * remove its run-once probe() infrastructure from memory after the driver * has bound to the device. * * One typical use for this would be with drivers for controllers integrated * into system-on-chip processors, where the controller devices have been * configured as part of board setup. * * Returns zero if the driver registered and bound to a device, else returns * a negative error code and with the driver not registered. */ int __init_or_module platform_driver_probe(struct platform_driver *drv, int (*probe)(struct platform_device *)) { int retval, code; /* temporary section violation during probe() */ drv->probe = probe; retval = code = platform_driver_register(drv); /* Fixup that section violation, being paranoid about code scanning * the list of drivers in order to probe new devices. Check to see * if the probe was successful, and make sure any forced probes of * new devices fail. */ spin_lock(&platform_bus_type.p->klist_drivers.k_lock); drv->probe = NULL; if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) retval = -ENODEV; drv->driver.probe = platform_drv_probe_fail; spin_unlock(&platform_bus_type.p->klist_drivers.k_lock); if (code != retval) platform_driver_unregister(drv); return retval; } EXPORT_SYMBOL_GPL(platform_driver_probe); /* modalias support enables more hands-off userspace setup: * (a) environment variable lets new-style hotplug events work once system is * fully running: "modprobe $MODALIAS" * (b) sysfs attribute lets new-style coldplug recover from hotplug events * mishandled before system is fully running: "modprobe $(cat modalias)" */ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf) { struct platform_device *pdev = to_platform_device(dev); int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } static struct device_attribute platform_dev_attrs[] = { __ATTR_RO(modalias), __ATTR_NULL, }; static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { struct platform_device *pdev = to_platform_device(dev); add_uevent_var(env, "MODALIAS=platform:%s", pdev->name); return 0; } /** * platform_match - bind platform device to platform driver. * @dev: device. * @drv: driver. * * Platform device IDs are assumed to be encoded like this: * "<name><instance>", where <name> is a short description of the type of * device, like "pci" or "floppy", and <instance> is the enumerated * instance of the device, like '0' or '42'. Driver IDs are simply * "<name>". So, extract the <name> from the platform_device structure, * and compare it against the name of the driver. Return whether they match * or not. */ static int platform_match(struct device *dev, struct device_driver *drv) { struct platform_device *pdev; pdev = container_of(dev, struct platform_device, dev); return (strcmp(pdev->name, drv->name) == 0); } #ifdef CONFIG_PM_SLEEP static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { int ret = 0; if (dev->driver && dev->driver->suspend) ret = dev->driver->suspend(dev, mesg); return ret; } static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; int ret = 0; pdev = container_of(dev, struct platform_device, dev); if (dev->driver && drv->suspend_late) ret = drv->suspend_late(pdev, mesg); return ret; } static int platform_legacy_resume_early(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; int ret = 0; pdev = container_of(dev, struct platform_device, dev); if (dev->driver && drv->resume_early) ret = drv->resume_early(pdev); return ret; } static int platform_legacy_resume(struct device *dev) { int ret = 0; if (dev->driver && dev->driver->resume) ret = dev->driver->resume(dev); return ret; } static int platform_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (drv && drv->pm && drv->pm->prepare) ret = drv->pm->prepare(dev); return ret; } static void platform_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); } #ifdef CONFIG_SUSPEND static int platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = platform_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } static int platform_pm_suspend_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend_noirq) ret = drv->pm->suspend_noirq(dev); } else { ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); } return ret; } static int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = platform_legacy_resume(dev); } return ret; } static int platform_pm_resume_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume_noirq) ret = drv->pm->resume_noirq(dev); } else { ret = platform_legacy_resume_early(dev); } return ret; } #else /* !CONFIG_SUSPEND */ #define platform_pm_suspend NULL #define platform_pm_resume NULL #define platform_pm_suspend_noirq NULL #define platform_pm_resume_noirq NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION static int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = platform_legacy_suspend(dev, PMSG_FREEZE); } return ret; } static int platform_pm_freeze_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze_noirq) ret = drv->pm->freeze_noirq(dev); } else { ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); } return ret; } static int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = platform_legacy_resume(dev); } return ret; } static int platform_pm_thaw_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw_noirq) ret = drv->pm->thaw_noirq(dev); } else { ret = platform_legacy_resume_early(dev); } return ret; } static int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } static int platform_pm_poweroff_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff_noirq) ret = drv->pm->poweroff_noirq(dev); } else { ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); } return ret; } static int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = platform_legacy_resume(dev); } return ret; } static int platform_pm_restore_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore_noirq) ret = drv->pm->restore_noirq(dev); } else { ret = platform_legacy_resume_early(dev); } return ret; } #else /* !CONFIG_HIBERNATION */ #define platform_pm_freeze NULL #define platform_pm_thaw NULL #define platform_pm_poweroff NULL #define platform_pm_restore NULL #define platform_pm_freeze_noirq NULL #define platform_pm_thaw_noirq NULL #define platform_pm_poweroff_noirq NULL #define platform_pm_restore_noirq NULL #endif /* !CONFIG_HIBERNATION */ static struct dev_pm_ops platform_dev_pm_ops = { .prepare = platform_pm_prepare, .complete = platform_pm_complete, .suspend = platform_pm_suspend, .resume = platform_pm_resume, .freeze = platform_pm_freeze, .thaw = platform_pm_thaw, .poweroff = platform_pm_poweroff, .restore = platform_pm_restore, .suspend_noirq = platform_pm_suspend_noirq, .resume_noirq = platform_pm_resume_noirq, .freeze_noirq = platform_pm_freeze_noirq, .thaw_noirq = platform_pm_thaw_noirq, .poweroff_noirq = platform_pm_poweroff_noirq, .restore_noirq = platform_pm_restore_noirq, }; #define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops) #else /* !CONFIG_PM_SLEEP */ #define PLATFORM_PM_OPS_PTR NULL #endif /* !CONFIG_PM_SLEEP */ struct bus_type platform_bus_type = { .name = "platform", .dev_attrs = platform_dev_attrs, .match = platform_match, .uevent = platform_uevent, .pm = PLATFORM_PM_OPS_PTR, }; EXPORT_SYMBOL_GPL(platform_bus_type); int __init platform_bus_init(void) { int error; error = device_register(&platform_bus); if (error) return error; error = bus_register(&platform_bus_type); if (error) device_unregister(&platform_bus); return error; } #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 dma_get_required_mask(struct device *dev) { u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); u64 mask; if (!high_totalram) { /* convert to mask just covering totalram */ low_totalram = (1 << (fls(low_totalram) - 1)); low_totalram += low_totalram - 1; mask = low_totalram; } else { high_totalram = (1 << (fls(high_totalram) - 1)); high_totalram += high_totalram - 1; mask = (((u64)high_totalram) << 32) + 0xffffffff; } return mask; } EXPORT_SYMBOL_GPL(dma_get_required_mask); #endif
gpl-2.0
dragonbane0/dolphin
Externals/wxWidgets3/src/common/mimecmn.cpp
121
20407
///////////////////////////////////////////////////////////////////////////// // Name: src/common/mimecmn.cpp // Purpose: classes and functions to manage MIME types // Author: Vadim Zeitlin // Modified by: // Chris Elliott (biol75@york.ac.uk) 5 Dec 00: write support for Win32 // Created: 23.09.98 // Copyright: (c) 1998 Vadim Zeitlin <zeitlin@dptmaths.ens-cachan.fr> // Licence: wxWindows licence (part of wxExtra library) ///////////////////////////////////////////////////////////////////////////// // ============================================================================ // declarations // ============================================================================ // ---------------------------------------------------------------------------- // headers // ---------------------------------------------------------------------------- // for compilers that support precompilation, includes "wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #if wxUSE_MIMETYPE #include "wx/mimetype.h" #ifndef WX_PRECOMP #include "wx/dynarray.h" #include "wx/string.h" #include "wx/intl.h" #include "wx/log.h" #include "wx/module.h" #include "wx/crt.h" #endif //WX_PRECOMP #include "wx/file.h" #include "wx/iconloc.h" #include "wx/confbase.h" // other standard headers #include <ctype.h> // implementation classes: #if defined(__WINDOWS__) #include "wx/msw/mimetype.h" #elif ( defined(__DARWIN__) ) #include "wx/osx/mimetype.h" #elif defined(__WXPM__) || defined (__EMX__) #include "wx/os2/mimetype.h" #undef __UNIX__ #elif defined(__DOS__) #include "wx/msdos/mimetype.h" #else // Unix #include "wx/unix/mimetype.h" #endif // ============================================================================ // common classes // ============================================================================ // ---------------------------------------------------------------------------- // wxMimeTypeCommands // ---------------------------------------------------------------------------- void wxMimeTypeCommands::AddOrReplaceVerb(const wxString& verb, const wxString& cmd) { int n = m_verbs.Index(verb, false /* ignore case */); if ( n == wxNOT_FOUND ) { m_verbs.Add(verb); m_commands.Add(cmd); } else { m_commands[n] = cmd; } } wxString wxMimeTypeCommands::GetCommandForVerb(const wxString& verb, size_t *idx) const { wxString s; int n = m_verbs.Index(verb); if ( n != wxNOT_FOUND ) { s = m_commands[(size_t)n]; if ( idx ) *idx = n; } else if ( idx ) { // different from any valid index *idx = (size_t)-1; } return s; } wxString wxMimeTypeCommands::GetVerbCmd(size_t n) const { return m_verbs[n] + wxT('=') + m_commands[n]; } // ---------------------------------------------------------------------------- // wxFileTypeInfo // ---------------------------------------------------------------------------- void wxFileTypeInfo::DoVarArgInit(const wxString& mimeType, const wxString& openCmd, const wxString& printCmd, const wxString& desc, va_list argptr) { m_mimeType = mimeType; m_openCmd = openCmd; m_printCmd = printCmd; m_desc = desc; for ( ;; ) { // icc gives this warning in its own va_arg() macro, argh #ifdef __INTELC__ #pragma warning(push) #pragma warning(disable: 1684) #endif wxArgNormalizedString ext(WX_VA_ARG_STRING(argptr)); #ifdef __INTELC__ #pragma warning(pop) #endif if ( !ext ) { // NULL terminates the list break; } m_exts.Add(ext.GetString()); } } void wxFileTypeInfo::VarArgInit(const wxString *mimeType, const wxString *openCmd, const wxString *printCmd, const wxString *desc, ...) { va_list argptr; va_start(argptr, desc); DoVarArgInit(*mimeType, *openCmd, *printCmd, *desc, argptr); va_end(argptr); } wxFileTypeInfo::wxFileTypeInfo(const wxArrayString& sArray) { m_mimeType = sArray [0u]; m_openCmd = sArray [1u]; m_printCmd = sArray [2u]; m_desc = sArray [3u]; size_t count = sArray.GetCount(); for ( size_t i = 4; i < count; i++ ) { m_exts.Add(sArray[i]); } } #include "wx/arrimpl.cpp" WX_DEFINE_OBJARRAY(wxArrayFileTypeInfo) // ============================================================================ // implementation of the wrapper classes // ============================================================================ // ---------------------------------------------------------------------------- // wxFileType // ---------------------------------------------------------------------------- /* static */ wxString wxFileType::ExpandCommand(const wxString& command, const wxFileType::MessageParameters& params) { bool hasFilename = false; // We consider that only the file names with spaces in them need to be // handled specially. This is not perfect, but this can be done easily // under all platforms while handling the file names with quotes in them, // for example, needs to be done differently. const bool needToQuoteFilename = params.GetFileName().find_first_of(" \t") != wxString::npos; wxString str; for ( const wxChar *pc = command.c_str(); *pc != wxT('\0'); pc++ ) { if ( *pc == wxT('%') ) { switch ( *++pc ) { case wxT('s'): // don't quote the file name if it's already quoted: notice // that we check for a quote following it and not preceding // it as at least under Windows we can have commands // containing "file://%s" (with quotes) in them so the // argument may be quoted even if there is no quote // directly before "%s" itself if ( needToQuoteFilename && pc[1] != '"' ) str << wxT('"') << params.GetFileName() << wxT('"'); else str << params.GetFileName(); hasFilename = true; break; case wxT('t'): // '%t' expands into MIME type (quote it too just to be // consistent) str << wxT('\'') << params.GetMimeType() << wxT('\''); break; case wxT('{'): { const wxChar *pEnd = wxStrchr(pc, wxT('}')); if ( pEnd == NULL ) { wxString mimetype; wxLogWarning(_("Unmatched '{' in an entry for mime type %s."), params.GetMimeType().c_str()); str << wxT("%{"); } else { wxString param(pc + 1, pEnd - pc - 1); str << wxT('\'') << params.GetParamValue(param) << wxT('\''); pc = pEnd; } } break; case wxT('n'): case wxT('F'): // TODO %n is the number of parts, %F is an array containing // the names of temp files these parts were written to // and their mime types. break; default: wxLogDebug(wxT("Unknown field %%%c in command '%s'."), *pc, command.c_str()); str << *pc; } } else { str << *pc; } } // metamail(1) man page states that if the mailcap entry doesn't have '%s' // the program will accept the data on stdin so normally we should append // "< %s" to the end of the command in such case, but not all commands // behave like this, in particular a common test is 'test -n "$DISPLAY"' // and appending "< %s" to this command makes the test fail... I don't // know of the correct solution, try to guess what we have to do. // test now carried out on reading file so test should never get here if ( !hasFilename && !str.empty() #ifdef __UNIX__ && !str.StartsWith(wxT("test ")) #endif // Unix ) { str << wxT(" < "); if ( needToQuoteFilename ) str << '"'; str << params.GetFileName(); if ( needToQuoteFilename ) str << '"'; } return str; } wxFileType::wxFileType(const wxFileTypeInfo& info) { m_info = &info; m_impl = NULL; } wxFileType::wxFileType() { m_info = NULL; m_impl = new wxFileTypeImpl; } wxFileType::~wxFileType() { if ( m_impl ) delete m_impl; } bool wxFileType::GetExtensions(wxArrayString& extensions) { if ( m_info ) { extensions = m_info->GetExtensions(); return true; } return m_impl->GetExtensions(extensions); } bool wxFileType::GetMimeType(wxString *mimeType) const { wxCHECK_MSG( mimeType, false, wxT("invalid parameter in GetMimeType") ); if ( m_info ) { *mimeType = m_info->GetMimeType(); return true; } return m_impl->GetMimeType(mimeType); } bool wxFileType::GetMimeTypes(wxArrayString& mimeTypes) const { if ( m_info ) { mimeTypes.Clear(); mimeTypes.Add(m_info->GetMimeType()); return true; } return m_impl->GetMimeTypes(mimeTypes); } bool wxFileType::GetIcon(wxIconLocation *iconLoc) const { if ( m_info ) { if ( iconLoc ) { iconLoc->SetFileName(m_info->GetIconFile()); #ifdef __WINDOWS__ iconLoc->SetIndex(m_info->GetIconIndex()); #endif // __WINDOWS__ } return true; } return m_impl->GetIcon(iconLoc); } bool wxFileType::GetIcon(wxIconLocation *iconloc, const MessageParameters& params) const { if ( !GetIcon(iconloc) ) { return false; } // we may have "%s" in the icon location string, at least under Windows, so // expand this if ( iconloc ) { iconloc->SetFileName(ExpandCommand(iconloc->GetFileName(), params)); } return true; } bool wxFileType::GetDescription(wxString *desc) const { wxCHECK_MSG( desc, false, wxT("invalid parameter in GetDescription") ); if ( m_info ) { *desc = m_info->GetDescription(); return true; } return m_impl->GetDescription(desc); } bool wxFileType::GetOpenCommand(wxString *openCmd, const wxFileType::MessageParameters& params) const { wxCHECK_MSG( openCmd, false, wxT("invalid parameter in GetOpenCommand") ); if ( m_info ) { *openCmd = ExpandCommand(m_info->GetOpenCommand(), params); return true; } return m_impl->GetOpenCommand(openCmd, params); } wxString wxFileType::GetOpenCommand(const wxString& filename) const { wxString cmd; if ( !GetOpenCommand(&cmd, filename) ) { // return empty string to indicate an error cmd.clear(); } return cmd; } bool wxFileType::GetPrintCommand(wxString *printCmd, const wxFileType::MessageParameters& params) const { wxCHECK_MSG( printCmd, false, wxT("invalid parameter in GetPrintCommand") ); if ( m_info ) { *printCmd = ExpandCommand(m_info->GetPrintCommand(), params); return true; } return m_impl->GetPrintCommand(printCmd, params); } size_t wxFileType::GetAllCommands(wxArrayString *verbs, wxArrayString *commands, const wxFileType::MessageParameters& params) const { if ( verbs ) verbs->Clear(); if ( commands ) commands->Clear(); #if defined (__WINDOWS__) || defined(__UNIX__) return m_impl->GetAllCommands(verbs, commands, params); #else // !__WINDOWS__ || __UNIX__ // we don't know how to retrieve all commands, so just try the 2 we know // about size_t count = 0; wxString cmd; if ( GetOpenCommand(&cmd, params) ) { if ( verbs ) verbs->Add(wxT("Open")); if ( commands ) commands->Add(cmd); count++; } if ( GetPrintCommand(&cmd, params) ) { if ( verbs ) verbs->Add(wxT("Print")); if ( commands ) commands->Add(cmd); count++; } return count; #endif // __WINDOWS__/| __UNIX__ } bool wxFileType::Unassociate() { #if defined(__WINDOWS__) return m_impl->Unassociate(); #elif defined(__UNIX__) return m_impl->Unassociate(this); #else wxFAIL_MSG( wxT("not implemented") ); // TODO return false; #endif } bool wxFileType::SetCommand(const wxString& cmd, const wxString& verb, bool overwriteprompt) { #if defined (__WINDOWS__) || defined(__UNIX__) return m_impl->SetCommand(cmd, verb, overwriteprompt); #else wxUnusedVar(cmd); wxUnusedVar(verb); wxUnusedVar(overwriteprompt); wxFAIL_MSG(wxT("not implemented")); return false; #endif } bool wxFileType::SetDefaultIcon(const wxString& cmd, int index) { wxString sTmp = cmd; #ifdef __WINDOWS__ // VZ: should we do this? // chris elliott : only makes sense in MS windows if ( sTmp.empty() ) GetOpenCommand(&sTmp, wxFileType::MessageParameters(wxEmptyString, wxEmptyString)); #endif wxCHECK_MSG( !sTmp.empty(), false, wxT("need the icon file") ); #if defined (__WINDOWS__) || defined(__UNIX__) return m_impl->SetDefaultIcon (cmd, index); #else wxUnusedVar(index); wxFAIL_MSG(wxT("not implemented")); return false; #endif } // ---------------------------------------------------------------------------- // wxMimeTypesManagerFactory // ---------------------------------------------------------------------------- wxMimeTypesManagerFactory *wxMimeTypesManagerFactory::m_factory = NULL; /* static */ void wxMimeTypesManagerFactory::Set(wxMimeTypesManagerFactory *factory) { delete m_factory; m_factory = factory; } /* static */ wxMimeTypesManagerFactory *wxMimeTypesManagerFactory::Get() { if ( !m_factory ) m_factory = new wxMimeTypesManagerFactory; return m_factory; } wxMimeTypesManagerImpl *wxMimeTypesManagerFactory::CreateMimeTypesManagerImpl() { return new wxMimeTypesManagerImpl; } // ---------------------------------------------------------------------------- // wxMimeTypesManager // ---------------------------------------------------------------------------- void wxMimeTypesManager::EnsureImpl() { if ( !m_impl ) m_impl = wxMimeTypesManagerFactory::Get()->CreateMimeTypesManagerImpl(); } bool wxMimeTypesManager::IsOfType(const wxString& mimeType, const wxString& wildcard) { wxASSERT_MSG( mimeType.Find(wxT('*')) == wxNOT_FOUND, wxT("first MIME type can't contain wildcards") ); // all comparaisons are case insensitive (2nd arg of IsSameAs() is false) if ( wildcard.BeforeFirst(wxT('/')). IsSameAs(mimeType.BeforeFirst(wxT('/')), false) ) { wxString strSubtype = wildcard.AfterFirst(wxT('/')); if ( strSubtype == wxT("*") || strSubtype.IsSameAs(mimeType.AfterFirst(wxT('/')), false) ) { // matches (either exactly or it's a wildcard) return true; } } return false; } wxMimeTypesManager::wxMimeTypesManager() { m_impl = NULL; } wxMimeTypesManager::~wxMimeTypesManager() { if ( m_impl ) delete m_impl; } bool wxMimeTypesManager::Unassociate(wxFileType *ft) { EnsureImpl(); #if defined(__UNIX__) && !defined(__CYGWIN__) && !defined(__WINE__) return m_impl->Unassociate(ft); #else return ft->Unassociate(); #endif } wxFileType * wxMimeTypesManager::Associate(const wxFileTypeInfo& ftInfo) { EnsureImpl(); #if defined(__WINDOWS__) || defined(__UNIX__) return m_impl->Associate(ftInfo); #else // other platforms wxUnusedVar(ftInfo); wxFAIL_MSG( wxT("not implemented") ); // TODO return NULL; #endif // platforms } wxFileType * wxMimeTypesManager::GetFileTypeFromExtension(const wxString& ext) { EnsureImpl(); wxString::const_iterator i = ext.begin(); const wxString::const_iterator end = ext.end(); wxString extWithoutDot; if ( i != end && *i == '.' ) extWithoutDot.assign(++i, ext.end()); else extWithoutDot = ext; wxCHECK_MSG( !ext.empty(), NULL, wxT("extension can't be empty") ); wxFileType *ft = m_impl->GetFileTypeFromExtension(extWithoutDot); if ( !ft ) { // check the fallbacks // // TODO linear search is potentially slow, perhaps we should use a // sorted array? size_t count = m_fallbacks.GetCount(); for ( size_t n = 0; n < count; n++ ) { if ( m_fallbacks[n].GetExtensions().Index(ext) != wxNOT_FOUND ) { ft = new wxFileType(m_fallbacks[n]); break; } } } return ft; } wxFileType * wxMimeTypesManager::GetFileTypeFromMimeType(const wxString& mimeType) { EnsureImpl(); wxFileType *ft = m_impl->GetFileTypeFromMimeType(mimeType); if ( !ft ) { // check the fallbacks // // TODO linear search is potentially slow, perhaps we should use a // sorted array? size_t count = m_fallbacks.GetCount(); for ( size_t n = 0; n < count; n++ ) { if ( wxMimeTypesManager::IsOfType(mimeType, m_fallbacks[n].GetMimeType()) ) { ft = new wxFileType(m_fallbacks[n]); break; } } } return ft; } void wxMimeTypesManager::AddFallbacks(const wxFileTypeInfo *filetypes) { EnsureImpl(); for ( const wxFileTypeInfo *ft = filetypes; ft && ft->IsValid(); ft++ ) { AddFallback(*ft); } } size_t wxMimeTypesManager::EnumAllFileTypes(wxArrayString& mimetypes) { EnsureImpl(); size_t countAll = m_impl->EnumAllFileTypes(mimetypes); // add the fallback filetypes size_t count = m_fallbacks.GetCount(); for ( size_t n = 0; n < count; n++ ) { if ( mimetypes.Index(m_fallbacks[n].GetMimeType()) == wxNOT_FOUND ) { mimetypes.Add(m_fallbacks[n].GetMimeType()); countAll++; } } return countAll; } void wxMimeTypesManager::Initialize(int mcapStyle, const wxString& sExtraDir) { #if defined(__UNIX__) && !defined(__CYGWIN__) && !defined(__WINE__) EnsureImpl(); m_impl->Initialize(mcapStyle, sExtraDir); #else (void)mcapStyle; (void)sExtraDir; #endif // Unix } // and this function clears all the data from the manager void wxMimeTypesManager::ClearData() { #if defined(__UNIX__) && !defined(__CYGWIN__) && !defined(__WINE__) EnsureImpl(); m_impl->ClearData(); #endif // Unix } // ---------------------------------------------------------------------------- // global data and wxMimeTypeCmnModule // ---------------------------------------------------------------------------- // private object static wxMimeTypesManager gs_mimeTypesManager; // and public pointer wxMimeTypesManager *wxTheMimeTypesManager = &gs_mimeTypesManager; class wxMimeTypeCmnModule: public wxModule { public: wxMimeTypeCmnModule() : wxModule() { } virtual bool OnInit() { return true; } virtual void OnExit() { wxMimeTypesManagerFactory::Set(NULL); if ( gs_mimeTypesManager.m_impl != NULL ) { wxDELETE(gs_mimeTypesManager.m_impl); gs_mimeTypesManager.m_fallbacks.Clear(); } } DECLARE_DYNAMIC_CLASS(wxMimeTypeCmnModule) }; IMPLEMENT_DYNAMIC_CLASS(wxMimeTypeCmnModule, wxModule) #endif // wxUSE_MIMETYPE
gpl-2.0
kaostao/linux
drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
377
8585
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/bios.h> #include <subdev/bios/pll.h> #include <subdev/timer.h> #include "pll.h" #include "nva3.h" struct nva3_clock_priv { struct nouveau_clock base; struct nva3_clock_info eng[nv_clk_src_max]; }; static u32 read_clk(struct nva3_clock_priv *, int, bool); static u32 read_pll(struct nva3_clock_priv *, int, u32); static u32 read_vco(struct nva3_clock_priv *priv, int clk) { u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4)); if ((sctl & 0x00000030) != 0x00000030) return read_pll(priv, 0x41, 0x00e820); return read_pll(priv, 0x42, 0x00e8a0); } static u32 read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en) { u32 sctl, sdiv, sclk; /* refclk for the 0xe8xx plls is a fixed frequency */ if (clk >= 0x40) { if (nv_device(priv)->chipset == 0xaf) { /* no joke.. seriously.. sigh.. */ return nv_rd32(priv, 0x00471c) * 1000; } return nv_device(priv)->crystal; } sctl = nv_rd32(priv, 0x4120 + (clk * 4)); if (!ignore_en && !(sctl & 0x00000100)) return 0; switch (sctl & 0x00003000) { case 0x00000000: return nv_device(priv)->crystal; case 0x00002000: if (sctl & 0x00000040) return 108000; return 100000; case 0x00003000: sclk = read_vco(priv, clk); sdiv = ((sctl & 0x003f0000) >> 16) + 2; return (sclk * 2) / sdiv; default: return 0; } } static u32 read_pll(struct nva3_clock_priv *priv, int clk, u32 pll) { u32 ctrl = nv_rd32(priv, pll + 0); u32 sclk = 0, P = 1, N = 1, M = 1; if (!(ctrl & 0x00000008)) { if (ctrl & 0x00000001) { u32 coef = nv_rd32(priv, pll + 4); M = (coef & 0x000000ff) >> 0; N = (coef & 0x0000ff00) >> 8; P = (coef & 0x003f0000) >> 16; /* no post-divider on these.. */ if ((pll & 0x00ff00) == 0x00e800) P = 1; sclk = read_clk(priv, 0x00 + clk, false); } } else { sclk = read_clk(priv, 0x10 + clk, false); } if (M * P) return sclk * N / (M * P); return 0; } static int nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) { struct nva3_clock_priv *priv = (void *)clk; switch (src) { case nv_clk_src_crystal: return nv_device(priv)->crystal; case nv_clk_src_href: return 100000; case nv_clk_src_core: return read_pll(priv, 0x00, 0x4200); case nv_clk_src_shader: return read_pll(priv, 0x01, 0x4220); case nv_clk_src_mem: return read_pll(priv, 0x02, 0x4000); case nv_clk_src_disp: return read_clk(priv, 0x20, false); case nv_clk_src_vdec: return read_clk(priv, 0x21, false); case nv_clk_src_daemon: return read_clk(priv, 0x25, false); default: nv_error(clk, "invalid clock source %d\n", src); return -EINVAL; } } int nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz, struct nva3_clock_info *info) { struct nouveau_bios *bios = nouveau_bios(clock); struct nva3_clock_priv *priv = (void *)clock; struct nvbios_pll limits; u32 oclk, sclk, sdiv; int P, N, M, diff; int ret; info->pll = 0; info->clk = 0; switch (khz) { case 27000: info->clk = 0x00000100; return khz; case 100000: info->clk = 0x00002100; return khz; case 108000: info->clk = 0x00002140; return khz; default: sclk = read_vco(priv, clk); sdiv = min((sclk * 2) / (khz - 2999), (u32)65); /* if the clock has a PLL attached, and we can get a within * [-2, 3) MHz of a divider, we'll disable the PLL and use * the divider instead. * * divider can go as low as 2, limited here because NVIDIA * and the VBIOS on my NVA8 seem to prefer using the PLL * for 810MHz - is there a good reason? */ if (sdiv > 4) { oclk = (sclk * 2) / sdiv; diff = khz - oclk; if (!pll || (diff >= -2000 && diff < 3000)) { info->clk = (((sdiv - 2) << 16) | 0x00003100); return oclk; } } if (!pll) return -ERANGE; break; } ret = nvbios_pll_parse(bios, pll, &limits); if (ret) return ret; limits.refclk = read_clk(priv, clk - 0x10, true); if (!limits.refclk) return -EINVAL; ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P); if (ret >= 0) { info->clk = nv_rd32(priv, 0x4120 + (clk * 4)); info->pll = (P << 16) | (N << 8) | M; } return ret ? ret : -ERANGE; } static int calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate, int clk, u32 pll, int idx) { int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx], &priv->eng[idx]); if (ret >= 0) return 0; return ret; } static void prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx) { struct nva3_clock_info *info = &priv->eng[idx]; const u32 src0 = 0x004120 + (clk * 4); const u32 src1 = 0x004160 + (clk * 4); const u32 ctrl = pll + 0; const u32 coef = pll + 4; if (info->pll) { nv_mask(priv, src0, 0x00000101, 0x00000101); nv_wr32(priv, coef, info->pll); nv_mask(priv, ctrl, 0x00000015, 0x00000015); nv_mask(priv, ctrl, 0x00000010, 0x00000000); nv_wait(priv, ctrl, 0x00020000, 0x00020000); nv_mask(priv, ctrl, 0x00000010, 0x00000010); nv_mask(priv, ctrl, 0x00000008, 0x00000000); nv_mask(priv, src1, 0x00000100, 0x00000000); nv_mask(priv, src1, 0x00000001, 0x00000000); } else { nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk); nv_mask(priv, ctrl, 0x00000018, 0x00000018); udelay(20); nv_mask(priv, ctrl, 0x00000001, 0x00000000); nv_mask(priv, src0, 0x00000100, 0x00000000); nv_mask(priv, src0, 0x00000001, 0x00000000); } } static void prog_clk(struct nva3_clock_priv *priv, int clk, int idx) { struct nva3_clock_info *info = &priv->eng[idx]; nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk); } static int nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) { struct nva3_clock_priv *priv = (void *)clk; int ret; if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) || (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) || (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) || (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec))) return ret; return 0; } static int nva3_clock_prog(struct nouveau_clock *clk) { struct nva3_clock_priv *priv = (void *)clk; prog_pll(priv, 0x00, 0x004200, nv_clk_src_core); prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader); prog_clk(priv, 0x20, nv_clk_src_disp); prog_clk(priv, 0x21, nv_clk_src_vdec); return 0; } static void nva3_clock_tidy(struct nouveau_clock *clk) { } static struct nouveau_clocks nva3_domain[] = { { nv_clk_src_crystal, 0xff }, { nv_clk_src_href , 0xff }, { nv_clk_src_core , 0x00, 0, "core", 1000 }, { nv_clk_src_shader , 0x01, 0, "shader", 1000 }, { nv_clk_src_mem , 0x02, 0, "memory", 1000 }, { nv_clk_src_vdec , 0x03 }, { nv_clk_src_disp , 0x04 }, { nv_clk_src_max } }; static int nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nva3_clock_priv *priv; int ret; ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.read = nva3_clock_read; priv->base.calc = nva3_clock_calc; priv->base.prog = nva3_clock_prog; priv->base.tidy = nva3_clock_tidy; return 0; } struct nouveau_oclass nva3_clock_oclass = { .handle = NV_SUBDEV(CLOCK, 0xa3), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nva3_clock_ctor, .dtor = _nouveau_clock_dtor, .init = _nouveau_clock_init, .fini = _nouveau_clock_fini, }, };
gpl-2.0
oceanfly/linux
drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
633
1976
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "dmacnv50.h" #include "rootnv50.h" #include <nvif/class.h> const struct nv50_disp_mthd_list g94_disp_core_mthd_sor = { .mthd = 0x0040, .addr = 0x000008, .data = { { 0x0600, 0x610794 }, {} } }; const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd = { .name = "Core", .addr = 0x000000, .prev = 0x000004, .data = { { "Global", 1, &nv50_disp_core_mthd_base }, { "DAC", 3, &g84_disp_core_mthd_dac }, { "SOR", 4, &g94_disp_core_mthd_sor }, { "PIOR", 3, &nv50_disp_core_mthd_pior }, { "HEAD", 2, &g84_disp_core_mthd_head }, {} } }; const struct nv50_disp_dmac_oclass g94_disp_core_oclass = { .base.oclass = GT206_DISP_CORE_CHANNEL_DMA, .base.minver = 0, .base.maxver = 0, .ctor = nv50_disp_core_new, .func = &nv50_disp_core_func, .mthd = &g94_disp_core_chan_mthd, .chid = 0, };
gpl-2.0
pjknkda/linux
drivers/dma-buf/reservation.c
633
11493
/* * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) * * Based on bo.c which bears the following copyright notice, * but is dual licensed: * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include <linux/reservation.h> #include <linux/export.h> DEFINE_WW_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); struct lock_class_key reservation_seqcount_class; EXPORT_SYMBOL(reservation_seqcount_class); const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); /* * Reserve space to add a shared fence to a reservation_object, * must be called with obj->lock held. */ int reservation_object_reserve_shared(struct reservation_object *obj) { struct reservation_object_list *fobj, *old; u32 max; old = reservation_object_get_list(obj); if (old && old->shared_max) { if (old->shared_count < old->shared_max) { /* perform an in-place update */ kfree(obj->staged); obj->staged = NULL; return 0; } else max = old->shared_max * 2; } else max = 4; /* * resize obj->staged or allocate if it doesn't exist, * noop if already correct size */ fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), GFP_KERNEL); if (!fobj) return -ENOMEM; obj->staged = fobj; fobj->shared_max = max; return 0; } EXPORT_SYMBOL(reservation_object_reserve_shared); static void reservation_object_add_shared_inplace(struct reservation_object *obj, struct reservation_object_list *fobj, struct fence *fence) { u32 i; fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < fobj->shared_count; ++i) { struct fence *old_fence; old_fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(obj)); if (old_fence->context == fence->context) { /* memory barrier is added by write_seqcount_begin */ RCU_INIT_POINTER(fobj->shared[i], fence); write_seqcount_end(&obj->seq); preempt_enable(); fence_put(old_fence); return; } } /* * memory barrier is added by write_seqcount_begin, * fobj->shared_count is protected by this lock too */ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); fobj->shared_count++; write_seqcount_end(&obj->seq); preempt_enable(); } static void reservation_object_add_shared_replace(struct reservation_object *obj, struct reservation_object_list *old, struct reservation_object_list *fobj, struct fence *fence) { unsigned i; struct fence *old_fence = NULL; fence_get(fence); if (!old) { RCU_INIT_POINTER(fobj->shared[0], fence); fobj->shared_count = 1; goto done; } /* * no need to bump fence refcounts, rcu_read access * requires the use of kref_get_unless_zero, and the * references from the old struct are carried over to * the new. */ fobj->shared_count = old->shared_count; for (i = 0; i < old->shared_count; ++i) { struct fence *check; check = rcu_dereference_protected(old->shared[i], reservation_object_held(obj)); if (!old_fence && check->context == fence->context) { old_fence = check; RCU_INIT_POINTER(fobj->shared[i], fence); } else RCU_INIT_POINTER(fobj->shared[i], check); } if (!old_fence) { RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); fobj->shared_count++; } done: preempt_disable(); write_seqcount_begin(&obj->seq); /* * RCU_INIT_POINTER can be used here, * seqcount provides the necessary barriers */ RCU_INIT_POINTER(obj->fence, fobj); write_seqcount_end(&obj->seq); preempt_enable(); if (old) kfree_rcu(old, rcu); if (old_fence) fence_put(old_fence); } /* * Add a fence to a shared slot, obj->lock must be held, and * reservation_object_reserve_shared_fence has been called. */ void reservation_object_add_shared_fence(struct reservation_object *obj, struct fence *fence) { struct reservation_object_list *old, *fobj = obj->staged; old = reservation_object_get_list(obj); obj->staged = NULL; if (!fobj) { BUG_ON(old->shared_count >= old->shared_max); reservation_object_add_shared_inplace(obj, old, fence); } else reservation_object_add_shared_replace(obj, old, fobj, fence); } EXPORT_SYMBOL(reservation_object_add_shared_fence); void reservation_object_add_excl_fence(struct reservation_object *obj, struct fence *fence) { struct fence *old_fence = reservation_object_get_excl(obj); struct reservation_object_list *old; u32 i = 0; old = reservation_object_get_list(obj); if (old) i = old->shared_count; if (fence) fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(obj->fence_excl, fence); if (old) old->shared_count = 0; write_seqcount_end(&obj->seq); preempt_enable(); /* inplace update, no shared fences */ while (i--) fence_put(rcu_dereference_protected(old->shared[i], reservation_object_held(obj))); if (old_fence) fence_put(old_fence); } EXPORT_SYMBOL(reservation_object_add_excl_fence); int reservation_object_get_fences_rcu(struct reservation_object *obj, struct fence **pfence_excl, unsigned *pshared_count, struct fence ***pshared) { unsigned shared_count = 0; unsigned retry = 1; struct fence **shared = NULL, *fence_excl = NULL; int ret = 0; while (retry) { struct reservation_object_list *fobj; unsigned seq; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); fobj = rcu_dereference(obj->fence); if (fobj) { struct fence **nshared; size_t sz = sizeof(*shared) * fobj->shared_max; nshared = krealloc(shared, sz, GFP_NOWAIT | __GFP_NOWARN); if (!nshared) { rcu_read_unlock(); nshared = krealloc(shared, sz, GFP_KERNEL); if (nshared) { shared = nshared; continue; } ret = -ENOMEM; shared_count = 0; break; } shared = nshared; memcpy(shared, fobj->shared, sz); shared_count = fobj->shared_count; } else shared_count = 0; fence_excl = rcu_dereference(obj->fence_excl); retry = read_seqcount_retry(&obj->seq, seq); if (retry) goto unlock; if (!fence_excl || fence_get_rcu(fence_excl)) { unsigned i; for (i = 0; i < shared_count; ++i) { if (fence_get_rcu(shared[i])) continue; /* uh oh, refcount failed, abort and retry */ while (i--) fence_put(shared[i]); if (fence_excl) { fence_put(fence_excl); fence_excl = NULL; } retry = 1; break; } } else retry = 1; unlock: rcu_read_unlock(); } *pshared_count = shared_count; if (shared_count) *pshared = shared; else { *pshared = NULL; kfree(shared); } *pfence_excl = fence_excl; return ret; } EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) { struct fence *fence; unsigned seq, shared_count, i = 0; long ret = timeout; if (!timeout) return reservation_object_test_signaled_rcu(obj, wait_all); retry: fence = NULL; shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); if (wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; if (read_seqcount_retry(&obj->seq, seq)) goto unlock_retry; for (i = 0; i < shared_count; ++i) { struct fence *lfence = rcu_dereference(fobj->shared[i]); if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) continue; if (!fence_get_rcu(lfence)) goto unlock_retry; if (fence_is_signaled(lfence)) { fence_put(lfence); continue; } fence = lfence; break; } } if (!shared_count) { struct fence *fence_excl = rcu_dereference(obj->fence_excl); if (read_seqcount_retry(&obj->seq, seq)) goto unlock_retry; if (fence_excl && !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { if (!fence_get_rcu(fence_excl)) goto unlock_retry; if (fence_is_signaled(fence_excl)) fence_put(fence_excl); else fence = fence_excl; } } rcu_read_unlock(); if (fence) { ret = fence_wait_timeout(fence, intr, ret); fence_put(fence); if (ret > 0 && wait_all && (i + 1 < shared_count)) goto retry; } return ret; unlock_retry: rcu_read_unlock(); goto retry; } EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); static inline int reservation_object_test_signaled_single(struct fence *passed_fence) { struct fence *fence, *lfence = passed_fence; int ret = 1; if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { fence = fence_get_rcu(lfence); if (!fence) return -1; ret = !!fence_is_signaled(fence); fence_put(fence); } return ret; } bool reservation_object_test_signaled_rcu(struct reservation_object *obj, bool test_all) { unsigned seq, shared_count; int ret = true; retry: shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); if (test_all) { unsigned i; struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; if (read_seqcount_retry(&obj->seq, seq)) goto unlock_retry; for (i = 0; i < shared_count; ++i) { struct fence *fence = rcu_dereference(fobj->shared[i]); ret = reservation_object_test_signaled_single(fence); if (ret < 0) goto unlock_retry; else if (!ret) break; } /* * There could be a read_seqcount_retry here, but nothing cares * about whether it's the old or newer fence pointers that are * signaled. That race could still have happened after checking * read_seqcount_retry. If you care, use ww_mutex_lock. */ } if (!shared_count) { struct fence *fence_excl = rcu_dereference(obj->fence_excl); if (read_seqcount_retry(&obj->seq, seq)) goto unlock_retry; if (fence_excl) { ret = reservation_object_test_signaled_single( fence_excl); if (ret < 0) goto unlock_retry; } } rcu_read_unlock(); return ret; unlock_retry: rcu_read_unlock(); goto retry; } EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
gpl-2.0
sleekmason/cyanogenmod12
drivers/staging/prima/CORE/VOSS/src/vos_types.c
1401
6496
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= \file vos_Types.c \brief virtual Operating System Servies (vOS) Basic type definitions Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary. ========================================================================*/ /* $Header$ */ /*-------------------------------------------------------------------------- Include Files ------------------------------------------------------------------------*/ #include "vos_types.h" #include "vos_trace.h" //#include "wlan_libra_config.h" /*-------------------------------------------------------------------------- Preprocessor definitions and constants ------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- Type declarations ------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- \brief vos_atomic_set_U32() - set a U32 variable atomically \param pTarget - pointer to the v_U32_t to set. \param value - the value to set in the v_U32_t variable. \return This function returns the value previously in the v_U32_t before the new value is set. \sa vos_atomic_increment_U32(), vos_atomic_decrement_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_set_U32( v_U32_t *pTarget, v_U32_t value ) { v_U32_t oldval; unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); oldval = *pTarget; *pTarget = value; local_irq_restore(flags); // v_U32_t prev = atomic_read(pTarget); // atomic_set(pTarget, value); return oldval; } /*---------------------------------------------------------------------------- \brief vos_atomic_increment_U32() - Increment a U32 variable atomically \param pTarget - pointer to the v_U32_t to increment. \return This function returns the value of the variable after the increment occurs. \sa vos_atomic_decrement_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_increment_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); ++*pTarget; local_irq_restore(flags); return *pTarget; // return atomic_inc_return(pTarget); } /*---------------------------------------------------------------------------- \brief vos_atomic_decrement_U32() - Decrement a U32 variable atomically \param pTarget - pointer to the v_U32_t to decrement. \return This function returns the value of the variable after the decrement occurs. \sa vos_atomic_increment_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_decrement_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } // return atomic_dec_return(pTarget); local_irq_save(flags); --*pTarget; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_increment_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget += value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_decrement_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget -= value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_get_skip_ssid_check(void) { /**This is needed by only AMSS for interoperatability **/ return 1; } v_U32_t vos_get_skip_11e_check(void) { /* this is needed only for AMSS for interopratability **/ return 1; }
gpl-2.0
pressy4pie/kernel_lge_f6mt
arch/mips/kernel/kspd.c
2681
9492
/* * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/unistd.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fs.h> #include <linux/syscalls.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/list.h> #include <asm/vpe.h> #include <asm/rtlx.h> #include <asm/kspd.h> static struct workqueue_struct *workqueue; static struct work_struct work; extern unsigned long cpu_khz; struct mtsp_syscall { int cmd; unsigned char abi; unsigned char size; }; struct mtsp_syscall_ret { int retval; int errno; }; struct mtsp_syscall_generic { int arg0; int arg1; int arg2; int arg3; int arg4; int arg5; int arg6; }; static struct list_head kspd_notifylist; static int sp_stopping; /* these should match with those in the SDE kit */ #define MTSP_SYSCALL_BASE 0 #define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0) #define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1) #define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2) #define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3) #define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4) #define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5) #define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6) #define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) #define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) #define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) #define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10) #define MTSP_O_RDONLY 0x0000 #define MTSP_O_WRONLY 0x0001 #define MTSP_O_RDWR 0x0002 #define MTSP_O_NONBLOCK 0x0004 #define MTSP_O_APPEND 0x0008 #define MTSP_O_SHLOCK 0x0010 #define MTSP_O_EXLOCK 0x0020 #define MTSP_O_ASYNC 0x0040 /* XXX: check which of these is actually O_SYNC vs O_DSYNC */ #define MTSP_O_FSYNC O_SYNC #define MTSP_O_NOFOLLOW 0x0100 #define MTSP_O_SYNC 0x0080 #define MTSP_O_CREAT 0x0200 #define MTSP_O_TRUNC 0x0400 #define MTSP_O_EXCL 0x0800 #define MTSP_O_BINARY 0x8000 extern int tclimit; struct apsp_table { int sp; int ap; }; /* we might want to do the mode flags too */ struct apsp_table open_flags_table[] = { { MTSP_O_RDWR, O_RDWR }, { MTSP_O_WRONLY, O_WRONLY }, { MTSP_O_CREAT, O_CREAT }, { MTSP_O_TRUNC, O_TRUNC }, { MTSP_O_NONBLOCK, O_NONBLOCK }, { MTSP_O_APPEND, O_APPEND }, { MTSP_O_NOFOLLOW, O_NOFOLLOW } }; struct apsp_table syscall_command_table[] = { { MTSP_SYSCALL_OPEN, __NR_open }, { MTSP_SYSCALL_CLOSE, __NR_close }, { MTSP_SYSCALL_READ, __NR_read }, { MTSP_SYSCALL_WRITE, __NR_write }, { MTSP_SYSCALL_LSEEK32, __NR_lseek }, { MTSP_SYSCALL_IOCTL, __NR_ioctl } }; static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) { register long int _num __asm__("$2") = num; register long int _arg0 __asm__("$4") = arg0; register long int _arg1 __asm__("$5") = arg1; register long int _arg2 __asm__("$6") = arg2; register long int _arg3 __asm__("$7") = arg3; mm_segment_t old_fs; old_fs = get_fs(); set_fs(KERNEL_DS); __asm__ __volatile__ ( " syscall \n" : "=r" (_num), "=r" (_arg3) : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3)); set_fs(old_fs); /* $a3 is error flag */ if (_arg3) return -_num; return _num; } static int translate_syscall_command(int cmd) { int i; int ret = -1; for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) { if ((cmd == syscall_command_table[i].sp)) return syscall_command_table[i].ap; } return ret; } static unsigned int translate_open_flags(int flags) { int i; unsigned int ret = 0; for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) { if( (flags & open_flags_table[i].sp) ) { ret |= open_flags_table[i].ap; } } return ret; } static int sp_setfsuidgid(uid_t uid, gid_t gid) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; new->fsuid = uid; new->fsgid = gid; commit_creds(new); return 0; } /* * Expects a request to be on the sysio channel. Reads it. Decides whether * its a linux syscall and runs it, or whatever. Puts the return code back * into the request and sends the whole thing back. */ void sp_work_handle_request(void) { struct mtsp_syscall sc; struct mtsp_syscall_generic generic; struct mtsp_syscall_ret ret; struct kspd_notifications *n; unsigned long written; mm_segment_t old_fs; struct timeval tv; struct timezone tz; int err, cmd; char *vcwd; int size; ret.retval = -1; old_fs = get_fs(); set_fs(KERNEL_DS); if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) { set_fs(old_fs); printk(KERN_ERR "Expected request but nothing to read\n"); return; } size = sc.size; if (size) { if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) { set_fs(old_fs); printk(KERN_ERR "Expected request but nothing to read\n"); return; } } /* Run the syscall at the privilege of the user who loaded the SP program */ if (vpe_getuid(tclimit)) { err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit)); if (!err) pr_err("Change of creds failed\n"); } switch (sc.cmd) { /* needs the flags argument translating from SDE kit to linux */ case MTSP_SYSCALL_PIPEFREQ: ret.retval = cpu_khz * 1000; ret.errno = 0; break; case MTSP_SYSCALL_GETTOD: memset(&tz, 0, sizeof(tz)); if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, (int)&tz, 0, 0)) == 0) ret.retval = tv.tv_sec; break; case MTSP_SYSCALL_EXIT: list_for_each_entry(n, &kspd_notifylist, list) n->kspd_sp_exit(tclimit); sp_stopping = 1; printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", generic.arg0); break; case MTSP_SYSCALL_OPEN: generic.arg1 = translate_open_flags(generic.arg1); vcwd = vpe_getcwd(tclimit); /* change to cwd of the process that loaded the SP program */ old_fs = get_fs(); set_fs(KERNEL_DS); sys_chdir(vcwd); set_fs(old_fs); sc.cmd = __NR_open; /* fall through */ default: if ((sc.cmd >= __NR_Linux) && (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) ) cmd = sc.cmd; else cmd = translate_syscall_command(sc.cmd); if (cmd >= 0) { ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1, generic.arg2, generic.arg3); } else printk(KERN_WARNING "KSPD: Unknown SP syscall number %d\n", sc.cmd); break; } /* switch */ if (vpe_getuid(tclimit)) { err = sp_setfsuidgid(0, 0); if (!err) pr_err("restoring old creds failed\n"); } old_fs = get_fs(); set_fs(KERNEL_DS); written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret)); set_fs(old_fs); if (written < sizeof(ret)) printk("KSPD: sp_work_handle_request failed to send to SP\n"); } static void sp_cleanup(void) { struct files_struct *files = current->files; int i, j; struct fdtable *fdt; j = 0; /* * It is safe to dereference the fd table without RCU or * ->file_lock */ fdt = files_fdtable(files); for (;;) { unsigned long set; i = j * __NFDBITS; if (i >= fdt->max_fds) break; set = fdt->open_fds[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) filp_close(file, files); } i++; set >>= 1; } } /* Put daemon cwd back to root to avoid umount problems */ sys_chdir("/"); } static int channel_open; /* the work handler */ static void sp_work(struct work_struct *unused) { if (!channel_open) { if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { printk("KSPD: unable to open sp channel\n"); sp_stopping = 1; } else { channel_open++; printk(KERN_DEBUG "KSPD: SP channel opened\n"); } } else { /* wait for some data, allow it to sleep */ rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1); /* Check we haven't been woken because we are stopping */ if (!sp_stopping) sp_work_handle_request(); } if (!sp_stopping) queue_work(workqueue, &work); else sp_cleanup(); } static void startwork(int vpe) { sp_stopping = channel_open = 0; if (workqueue == NULL) { if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) { printk(KERN_ERR "unable to start kspd\n"); return; } INIT_WORK(&work, sp_work); } queue_work(workqueue, &work); } static void stopwork(int vpe) { sp_stopping = 1; printk(KERN_DEBUG "KSPD: SP stopping\n"); } void kspd_notify(struct kspd_notifications *notify) { list_add(&notify->list, &kspd_notifylist); } static struct vpe_notifications notify; static int kspd_module_init(void) { INIT_LIST_HEAD(&kspd_notifylist); notify.start = startwork; notify.stop = stopwork; vpe_notify(tclimit, &notify); return 0; } static void kspd_module_exit(void) { } module_init(kspd_module_init); module_exit(kspd_module_exit); MODULE_DESCRIPTION("MIPS KSPD"); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
jonypx09/new_kernel_kylessopen
net/sched/sch_mq.c
2937
5745
/* * net/sched/sch_mq.c Classful multiqueue dummy scheduler * * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct mq_sched { struct Qdisc **qdiscs; }; static void mq_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); unsigned int ntx; if (!priv->qdiscs) return; for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) qdisc_destroy(priv->qdiscs[ntx]); kfree(priv->qdiscs); } static int mq_init(struct Qdisc *sch, struct nlattr *opt) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct netdev_queue *dev_queue; struct Qdisc *qdisc; unsigned int ntx; if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; /* pre-allocate qdiscs, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); if (priv->qdiscs == NULL) return -ENOMEM; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { dev_queue = netdev_get_tx_queue(dev, ntx); qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); if (qdisc == NULL) goto err; priv->qdiscs[ntx] = qdisc; } sch->flags |= TCQ_F_MQROOT; return 0; err: mq_destroy(sch); return -ENOMEM; } static void mq_attach(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct Qdisc *qdisc; unsigned int ntx; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = priv->qdiscs[ntx]; qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (qdisc) qdisc_destroy(qdisc); } kfree(priv->qdiscs); priv->qdiscs = NULL; } static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); struct Qdisc *qdisc; unsigned int ntx; sch->q.qlen = 0; memset(&sch->bstats, 0, sizeof(sch->bstats)); memset(&sch->qstats, 0, sizeof(sch->qstats)); for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; spin_lock_bh(qdisc_lock(qdisc)); sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; sch->bstats.packets += qdisc->bstats.packets; sch->qstats.qlen += qdisc->qstats.qlen; sch->qstats.backlog += qdisc->qstats.backlog; sch->qstats.drops += qdisc->qstats.drops; sch->qstats.requeues += qdisc->qstats.requeues; sch->qstats.overlimits += qdisc->qstats.overlimits; spin_unlock_bh(qdisc_lock(qdisc)); } return 0; } static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) { struct net_device *dev = qdisc_dev(sch); unsigned long ntx = cl - 1; if (ntx >= dev->num_tx_queues) return NULL; return netdev_get_tx_queue(dev, ntx); } static struct netdev_queue *mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm) { unsigned int ntx = TC_H_MIN(tcm->tcm_parent); struct netdev_queue *dev_queue = mq_queue_get(sch, ntx); if (!dev_queue) { struct net_device *dev = qdisc_dev(sch); return netdev_get_tx_queue(dev, 0); } return dev_queue; } static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, struct Qdisc **old) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); struct net_device *dev = qdisc_dev(sch); if (dev->flags & IFF_UP) dev_deactivate(dev); *old = dev_graft_qdisc(dev_queue, new); if (dev->flags & IFF_UP) dev_activate(dev); return 0; } static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); return dev_queue->qdisc_sleeping; } static unsigned long mq_get(struct Qdisc *sch, u32 classid) { unsigned int ntx = TC_H_MIN(classid); if (!mq_queue_get(sch, ntx)) return 0; return ntx; } static void mq_put(struct Qdisc *sch, unsigned long cl) { } static int mq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = dev_queue->qdisc_sleeping->handle; return 0; } static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; sch->qstats.qlen = sch->q.qlen; if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || gnet_stats_copy_queue(d, &sch->qstats) < 0) return -1; return 0; } static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct net_device *dev = qdisc_dev(sch); unsigned int ntx; if (arg->stop) return; arg->count = arg->skip; for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { if (arg->fn(sch, ntx + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static const struct Qdisc_class_ops mq_class_ops = { .select_queue = mq_select_queue, .graft = mq_graft, .leaf = mq_leaf, .get = mq_get, .put = mq_put, .walk = mq_walk, .dump = mq_dump_class, .dump_stats = mq_dump_class_stats, }; struct Qdisc_ops mq_qdisc_ops __read_mostly = { .cl_ops = &mq_class_ops, .id = "mq", .priv_size = sizeof(struct mq_sched), .init = mq_init, .destroy = mq_destroy, .attach = mq_attach, .dump = mq_dump, .owner = THIS_MODULE, };
gpl-2.0
parheliamm/i939u2
lib/kref.c
3193
2667
/* * kref.c - library routines for handling generic reference counted objects * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Corp. * * based on lib/kobject.c which was: * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> * * This file is released under the GPLv2. * */ #include <linux/kref.h> #include <linux/module.h> #include <linux/slab.h> /** * kref_init - initialize object. * @kref: object in question. */ void kref_init(struct kref *kref) { atomic_set(&kref->refcount, 1); smp_mb(); } /** * kref_get - increment refcount for object. * @kref: object. */ void kref_get(struct kref *kref) { WARN_ON(!atomic_read(&kref->refcount)); atomic_inc(&kref->refcount); smp_mb__after_atomic_inc(); } /** * kref_put - decrement refcount for object. * @kref: object. * @release: pointer to the function that will clean up the object when the * last reference to the object is released. * This pointer is required, and it is not acceptable to pass kfree * in as this function. * * Decrement the refcount, and if 0, call release(). * Return 1 if the object was removed, otherwise return 0. Beware, if this * function returns 0, you still can not count on the kref from remaining in * memory. Only use the return value if you want to see if the kref is now * gone, not present. */ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { WARN_ON(release == NULL); WARN_ON(release == (void (*)(struct kref *))kfree); if (atomic_dec_and_test(&kref->refcount)) { release(kref); return 1; } return 0; } /** * kref_sub - subtract a number of refcounts for object. * @kref: object. * @count: Number of recounts to subtract. * @release: pointer to the function that will clean up the object when the * last reference to the object is released. * This pointer is required, and it is not acceptable to pass kfree * in as this function. * * Subtract @count from the refcount, and if 0, call release(). * Return 1 if the object was removed, otherwise return 0. Beware, if this * function returns 0, you still can not count on the kref from remaining in * memory. Only use the return value if you want to see if the kref is now * gone, not present. */ int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *kref)) { WARN_ON(release == NULL); WARN_ON(release == (void (*)(struct kref *))kfree); if (atomic_sub_and_test((int) count, &kref->refcount)) { release(kref); return 1; } return 0; } EXPORT_SYMBOL(kref_init); EXPORT_SYMBOL(kref_get); EXPORT_SYMBOL(kref_put); EXPORT_SYMBOL(kref_sub);
gpl-2.0
glewarne/testing
arch/mips/loongson/common/bonito-irq.c
3449
1413
/* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) * * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/interrupt.h> #include <linux/compiler.h> #include <loongson.h> static inline void bonito_irq_enable(struct irq_data *d) { LOONGSON_INTENSET = (1 << (d->irq - LOONGSON_IRQ_BASE)); mmiowb(); } static inline void bonito_irq_disable(struct irq_data *d) { LOONGSON_INTENCLR = (1 << (d->irq - LOONGSON_IRQ_BASE)); mmiowb(); } static struct irq_chip bonito_irq_type = { .name = "bonito_irq", .irq_mask = bonito_irq_disable, .irq_unmask = bonito_irq_enable, }; static struct irqaction __maybe_unused dma_timeout_irqaction = { .handler = no_action, .name = "dma_timeout", }; void bonito_irq_init(void) { u32 i; for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++) irq_set_chip_and_handler(i, &bonito_irq_type, handle_level_irq); #ifdef CONFIG_CPU_LOONGSON2E setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction); #endif }
gpl-2.0
bestgames1/android_kernel_samsung_kylepro
arch/alpha/mm/fault.c
3961
5797
/* * linux/arch/alpha/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> #define __EXTERN_INLINE inline #include <asm/mmu_context.h> #include <asm/tlbflush.h> #undef __EXTERN_INLINE #include <linux/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/uaccess.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); /* * Force a new ASN for a task. */ #ifndef CONFIG_SMP unsigned long last_asn = ASN_FIRST_VERSION; #endif void __load_new_mm_context(struct mm_struct *next_mm) { unsigned long mmc; struct pcb_struct *pcb; mmc = __get_new_mm_context(next_mm, smp_processor_id()); next_mm->context[smp_processor_id()] = mmc; pcb = &current_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; __reload_thread(pcb); } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them. */ /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ (r) <= 18 ? (r)+8 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs by ignoring such an instruction. */ if (cause == 0) { unsigned int insn; __get_user(insn, (unsigned int __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */ (1ul << (insn >> 26) & 0x30f00001400ul)) { regs->pc += 4; return; } } /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm || in_atomic()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ good_area: si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) goto do_sigsegv; no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); regs->pc = newpc; return; } /* Oops. The kernel tried to access some bad page. We'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); do_exit(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, current); if (!user_mode(regs)) goto no_context; return; do_sigsegv: info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, current); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table with the "reference" page table from init. */ long index = pgd_index(address); pgd_t *pgd, *pgd_k; pgd = current->active_mm->pgd + index; pgd_k = swapper_pg_dir + index; if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { pgd_val(*pgd) = pgd_val(*pgd_k); return; } goto no_context; } #endif }
gpl-2.0
projectz201408/zkernel
arch/um/drivers/slirp_kern.c
4729
2652
/* * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL. */ #include <linux/if_arp.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/string.h> #include <net_kern.h> #include <net_user.h> #include "slirp.h" struct slirp_init { struct arg_list_dummy_wrapper argw; /* XXX should be simpler... */ }; void slirp_init(struct net_device *dev, void *data) { struct uml_net_private *private; struct slirp_data *spri; struct slirp_init *init = data; int i; private = netdev_priv(dev); spri = (struct slirp_data *) private->user; spri->argw = init->argw; spri->pid = -1; spri->slave = -1; spri->dev = dev; slip_proto_init(&spri->slip); dev->hard_header_len = 0; dev->header_ops = NULL; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 256; dev->flags = IFF_NOARP; printk("SLIRP backend - command line:"); for (i = 0; spri->argw.argv[i] != NULL; i++) printk(" '%s'",spri->argw.argv[i]); printk("\n"); } static unsigned short slirp_protocol(struct sk_buff *skbuff) { return htons(ETH_P_IP); } static int slirp_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return slirp_user_read(fd, skb_mac_header(skb), skb->dev->mtu, (struct slirp_data *) &lp->user); } static int slirp_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return slirp_user_write(fd, skb->data, skb->len, (struct slirp_data *) &lp->user); } const struct net_kern_info slirp_kern_info = { .init = slirp_init, .protocol = slirp_protocol, .read = slirp_read, .write = slirp_write, }; static int slirp_setup(char *str, char **mac_out, void *data) { struct slirp_init *init = data; int i=0; *init = ((struct slirp_init) { .argw = { { "slirp", NULL } } }); str = split_if_spec(str, mac_out, NULL); if (str == NULL) /* no command line given after MAC addr */ return 1; do { if (i >= SLIRP_MAX_ARGS - 1) { printk(KERN_WARNING "slirp_setup: truncating slirp " "arguments\n"); break; } init->argw.argv[i++] = str; while(*str && *str!=',') { if (*str == '_') *str=' '; str++; } if (*str != ',') break; *str++ = '\0'; } while (1); init->argw.argv[i] = NULL; return 1; } static struct transport slirp_transport = { .list = LIST_HEAD_INIT(slirp_transport.list), .name = "slirp", .setup = slirp_setup, .user = &slirp_user_info, .kern = &slirp_kern_info, .private_size = sizeof(struct slirp_data), .setup_size = sizeof(struct slirp_init), }; static int register_slirp(void) { register_transport(&slirp_transport); return 0; } late_initcall(register_slirp);
gpl-2.0
zparallax/amplitude_kernel_tw
arch/arm/mach-at91/board-sam9g20ek.c
4729
9780
/* * Copyright (C) 2005 SAN People * Copyright (C) 2008 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/regulator/consumer.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include <mach/system_rev.h> #include "sam9_smc.h" #include "generic.h" /* * board revision encoding * bit 0: * 0 => 1 sd/mmc slot * 1 => 2 sd/mmc slots connectors (board from revision C) */ #define HAVE_2MMC (1 << 0) static int inline ek_have_2mmc(void) { return machine_is_at91sam9g20ek_2mmc() || (system_rev & HAVE_2MMC); } static void __init ek_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !(defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_AT91)) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #endif }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PA7, .is_rmii = 1, }; static void __init ek_add_device_macb(void) { if (ek_have_2mmc()) ek_macb_data.phy_irq_pin = AT91_PIN_PB0; at91_add_device_eth(&ek_macb_data); } /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Bootstrap", .offset = 0, .size = 4 * SZ_1M, }, { .name = "Partition 1", .offset = MTDPART_OFS_NXTBLK, .size = 60 * SZ_1M, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; /* det_pin is not connected */ static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .det_pin = -EINVAL, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 2, .ncs_write_setup = 0, .nwe_setup = 2, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 7, .write_cycle = 7, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, .tdf_cycles = 3, }; static void __init ek_add_device_nand(void) { ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; else ek_nand_smc_config.mode |= AT91_SMC_DBW_8; /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * MCI (SD/MMC) * wp_pin and vcc_pin are not connected */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static struct mci_platform_data __initdata ek_mmc_data = { .slot[1] = { .bus_width = 4, .detect_pin = AT91_PIN_PC9, .wp_pin = -EINVAL, }, }; #else static struct at91_mmc_data __initdata ek_mmc_data = { .slot_b = 1, /* Only one slot so use slot B */ .wire4 = 1, .det_pin = AT91_PIN_PC9, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; #endif static void __init ek_add_device_mmc(void) { #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) if (ek_have_2mmc()) { ek_mmc_data.slot[0].bus_width = 4; ek_mmc_data.slot[0].detect_pin = AT91_PIN_PC2; ek_mmc_data.slot[0].wp_pin = -1; } at91_add_device_mci(0, &ek_mmc_data); #else at91_add_device_mmc(0, &ek_mmc_data); #endif } /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds5", .gpio = AT91_PIN_PA6, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds1", .gpio = AT91_PIN_PA9, .default_trigger = "heartbeat", } }; static void __init ek_add_device_gpio_leds(void) { if (ek_have_2mmc()) { ek_leds[0].gpio = AT91_PIN_PB8; ek_leds[1].gpio = AT91_PIN_PB9; } at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); } /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PA30, .code = BTN_3, .desc = "Button 3", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PA31, .code = BTN_4, .desc = "Button 4", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */ at91_set_deglitch(AT91_PIN_PA30, 1); at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */ at91_set_deglitch(AT91_PIN_PA31, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) static struct regulator_consumer_supply ek_audio_consumer_supplies[] = { REGULATOR_SUPPLY("AVDD", "0-001b"), REGULATOR_SUPPLY("HPVDD", "0-001b"), REGULATOR_SUPPLY("DBVDD", "0-001b"), REGULATOR_SUPPLY("DCVDD", "0-001b"), }; static struct regulator_init_data ek_avdd_reg_init_data = { .constraints = { .name = "3V3", .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .consumer_supplies = ek_audio_consumer_supplies, .num_consumer_supplies = ARRAY_SIZE(ek_audio_consumer_supplies), }; static struct fixed_voltage_config ek_vdd_pdata = { .supply_name = "board-3V3", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 0, .init_data = &ek_avdd_reg_init_data, }; static struct platform_device ek_voltage_regulator = { .name = "reg-fixed-voltage", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_vdd_pdata, }, }; static void __init ek_add_regulators(void) { platform_device_register(&ek_voltage_regulator); } #else static void __init ek_add_regulators(void) {} #endif static struct i2c_board_info __initdata ek_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50) }, { I2C_BOARD_INFO("wm8731", 0x1b) }, }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* NAND */ ek_add_device_nand(); /* Ethernet */ ek_add_device_macb(); /* Regulators */ ek_add_regulators(); /* MMC */ ek_add_device_mmc(); /* I2C */ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); /* LEDs */ ek_add_device_gpio_leds(); /* Push Buttons */ ek_add_device_buttons(); /* PCK0 provides MCLK to the WM8731 */ at91_set_B_periph(AT91_PIN_PC1, 0); /* SSC (for WM8731) */ at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); } MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = ek_init_early, .init_irq = at91_init_irq_default, .init_machine = ek_board_init, MACHINE_END MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = ek_init_early, .init_irq = at91_init_irq_default, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
dennes544/aosp_kernel_lge_hammerhead_dennes544
drivers/input/serio/q40kbd.c
4985
4917
/* * Copyright (c) 2000-2001 Vojtech Pavlik * * Based on the work of: * Richard Zidlicky <Richard.Zidlicky@stud.informatik.uni-erlangen.de> */ /* * Q40 PS/2 keyboard controller driver for Linux/m68k */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/init.h> #include <linux/serio.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/q40_master.h> #include <asm/irq.h> #include <asm/q40ints.h> #define DRV_NAME "q40kbd" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Q40 PS/2 keyboard controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); struct q40kbd { struct serio *port; spinlock_t lock; }; static irqreturn_t q40kbd_interrupt(int irq, void *dev_id) { struct q40kbd *q40kbd = dev_id; unsigned long flags; spin_lock_irqsave(&q40kbd->lock, flags); if (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG)) serio_interrupt(q40kbd->port, master_inb(KEYCODE_REG), 0); master_outb(-1, KEYBOARD_UNLOCK_REG); spin_unlock_irqrestore(&q40kbd->lock, flags); return IRQ_HANDLED; } /* * q40kbd_flush() flushes all data that may be in the keyboard buffers */ static void q40kbd_flush(struct q40kbd *q40kbd) { int maxread = 100; unsigned long flags; spin_lock_irqsave(&q40kbd->lock, flags); while (maxread-- && (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG))) master_inb(KEYCODE_REG); spin_unlock_irqrestore(&q40kbd->lock, flags); } static void q40kbd_stop(void) { master_outb(0, KEY_IRQ_ENABLE_REG); master_outb(-1, KEYBOARD_UNLOCK_REG); } /* * q40kbd_open() is called when a port is open by the higher layer. * It allocates the interrupt and enables in in the chip. */ static int q40kbd_open(struct serio *port) { struct q40kbd *q40kbd = port->port_data; q40kbd_flush(q40kbd); /* off we go */ master_outb(-1, KEYBOARD_UNLOCK_REG); master_outb(1, KEY_IRQ_ENABLE_REG); return 0; } static void q40kbd_close(struct serio *port) { struct q40kbd *q40kbd = port->port_data; q40kbd_stop(); q40kbd_flush(q40kbd); } static int __devinit q40kbd_probe(struct platform_device *pdev) { struct q40kbd *q40kbd; struct serio *port; int error; q40kbd = kzalloc(sizeof(struct q40kbd), GFP_KERNEL); port = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!q40kbd || !port) { error = -ENOMEM; goto err_free_mem; } q40kbd->port = port; spin_lock_init(&q40kbd->lock); port->id.type = SERIO_8042; port->open = q40kbd_open; port->close = q40kbd_close; port->port_data = q40kbd; port->dev.parent = &pdev->dev; strlcpy(port->name, "Q40 Kbd Port", sizeof(port->name)); strlcpy(port->phys, "Q40", sizeof(port->phys)); q40kbd_stop(); error = request_irq(Q40_IRQ_KEYBOARD, q40kbd_interrupt, 0, DRV_NAME, q40kbd); if (error) { dev_err(&pdev->dev, "Can't get irq %d.\n", Q40_IRQ_KEYBOARD); goto err_free_mem; } serio_register_port(q40kbd->port); platform_set_drvdata(pdev, q40kbd); printk(KERN_INFO "serio: Q40 kbd registered\n"); return 0; err_free_mem: kfree(port); kfree(q40kbd); return error; } static int __devexit q40kbd_remove(struct platform_device *pdev) { struct q40kbd *q40kbd = platform_get_drvdata(pdev); /* * q40kbd_close() will be called as part of unregistering * and will ensure that IRQ is turned off, so it is safe * to unregister port first and free IRQ later. */ serio_unregister_port(q40kbd->port); free_irq(Q40_IRQ_KEYBOARD, q40kbd); kfree(q40kbd); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver q40kbd_driver = { .driver = { .name = "q40kbd", .owner = THIS_MODULE, }, .remove = __devexit_p(q40kbd_remove), }; static int __init q40kbd_init(void) { return platform_driver_probe(&q40kbd_driver, q40kbd_probe); } static void __exit q40kbd_exit(void) { platform_driver_unregister(&q40kbd_driver); } module_init(q40kbd_init); module_exit(q40kbd_exit);
gpl-2.0
zcop/android_kernel_lge_d1lsk
drivers/i2c/busses/i2c-simtec.c
7545
3938
/* * Copyright (C) 2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Simtec Generic I2C Controller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> struct simtec_i2c_data { struct resource *ioarea; void __iomem *reg; struct i2c_adapter adap; struct i2c_algo_bit_data bit; }; #define CMD_SET_SDA (1<<2) #define CMD_SET_SCL (1<<3) #define STATE_SDA (1<<0) #define STATE_SCL (1<<1) /* i2c bit-bus functions */ static void simtec_i2c_setsda(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg); } static void simtec_i2c_setscl(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg); } static int simtec_i2c_getsda(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SDA ? 1 : 0; } static int simtec_i2c_getscl(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SCL ? 1 : 0; } /* device registration */ static int simtec_i2c_probe(struct platform_device *dev) { struct simtec_i2c_data *pd; struct resource *res; int size; int ret; pd = kzalloc(sizeof(struct simtec_i2c_data), GFP_KERNEL); if (pd == NULL) { dev_err(&dev->dev, "cannot allocate private data\n"); return -ENOMEM; } platform_set_drvdata(dev, pd); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&dev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto err; } size = resource_size(res); pd->ioarea = request_mem_region(res->start, size, dev->name); if (pd->ioarea == NULL) { dev_err(&dev->dev, "cannot request IO\n"); ret = -ENXIO; goto err; } pd->reg = ioremap(res->start, size); if (pd->reg == NULL) { dev_err(&dev->dev, "cannot map IO\n"); ret = -ENXIO; goto err_res; } /* setup the private data */ pd->adap.owner = THIS_MODULE; pd->adap.algo_data = &pd->bit; pd->adap.dev.parent = &dev->dev; strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name)); pd->bit.data = pd; pd->bit.setsda = simtec_i2c_setsda; pd->bit.setscl = simtec_i2c_setscl; pd->bit.getsda = simtec_i2c_getsda; pd->bit.getscl = simtec_i2c_getscl; pd->bit.timeout = HZ; pd->bit.udelay = 20; ret = i2c_bit_add_bus(&pd->adap); if (ret) goto err_all; return 0; err_all: iounmap(pd->reg); err_res: release_resource(pd->ioarea); kfree(pd->ioarea); err: kfree(pd); return ret; } static int simtec_i2c_remove(struct platform_device *dev) { struct simtec_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); iounmap(pd->reg); release_resource(pd->ioarea); kfree(pd->ioarea); kfree(pd); return 0; } /* device driver */ static struct platform_driver simtec_i2c_driver = { .driver = { .name = "simtec-i2c", .owner = THIS_MODULE, }, .probe = simtec_i2c_probe, .remove = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); MODULE_DESCRIPTION("Simtec Generic I2C Bus driver"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:simtec-i2c");
gpl-2.0
piccolo-dev/aquaris-M5
arch/openrisc/kernel/signal.c
1402
9359
/* * OpenRISC signal.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tracehook.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #define DEBUG_SIG 0 struct rt_sigframe { struct siginfo *pinfo; void *puc; struct siginfo info; struct ucontext uc; unsigned char retcode[16]; /* trampoline code */ }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) { unsigned int err = 0; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Restore the regs from &sc->regs. * (sc is already checked for VERIFY_READ since the sigframe was * checked in sys_sigreturn previously) */ if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long))) goto badframe; if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long))) goto badframe; if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long))) goto badframe; /* make sure the SM-bit is cleared so user-mode cannot fool us */ regs->sr &= ~SPR_SR_SM; /* TODO: the other ports use regs->orig_XX to disable syscall checks * after this completes, but we don't use that mechanism. maybe we can * use it now ? */ return err; badframe: return 1; } asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; sigset_t set; /* * Since we stacked the signal on a dword boundary, * then frame should be dword aligned here. If it's * not, then the user is trying to mess with us. */ if (((long)frame) & 3) goto badframe; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->gpr[11]; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; /* copy the regs */ err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long)); /* then some other stuff */ err |= __put_user(mask, &sc->oldmask); return err; } static inline unsigned long align_sigframe(unsigned long sp) { return sp & ~3UL; } /* * Work out where the signal frame should go. It's either on the user stack * or the alternate stack. */ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp = regs->sp; int onsigstack = on_sig_stack(sp); /* redzone */ sp -= STACK_FRAME_OVERHEAD; /* This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) { if (current->sas_ss_size) sp = current->sas_ss_sp + current->sas_ss_size; } sp = align_sigframe(sp - frame_size); /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (onsigstack && !likely(on_sig_stack(sp))) return (void __user *)-1L; return (void __user *)sp; } /* grab and setup a signal frame. * * basically we stack a lot of state info, and arrange for the * user-mode program to return to the kernel using either a * trampoline which performs the syscall sigreturn, or a provided * user-mode trampoline. */ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe *frame; unsigned long return_ip; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); if (ka->sa.sa_flags & SA_SIGINFO) err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Clear all the bits of the ucontext we don't use. */ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->sp); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* trampoline - the desired return ip is the retcode itself */ return_ip = (unsigned long)&frame->retcode; /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ err |= __put_user(0xa960, (short *)(frame->retcode + 0)); err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); if (err) goto give_sigsegv; /* TODO what is the current->exec_domain stuff and invmap ? */ /* Set up registers for signal handler */ regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */ regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ regs->gpr[3] = (unsigned long)sig; /* arg 1: signo */ regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ /* actually move the usp to reflect the stacked frame */ regs->sp = (unsigned long)frame; return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static inline void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs) { int ret; ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs); if (ret) return; signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Also note that the regs structure given here as an argument, is the latest * pushed pt_regs. It may or may not be the same as the first pushed registers * when the initial usermode->kernelmode transition took place. Therefore * we can use user_mode(regs) to see if we came directly from kernel or user * mode below. */ void do_signal(struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* If we are coming out of a syscall then we need * to check if the syscall was interrupted and wants to be * restarted after handling the signal. If so, the original * syscall number is put back into r11 and the PC rewound to * point at the l.sys instruction that resulted in the * original syscall. Syscall results other than the four * below mean that the syscall executed to completion and no * restart is necessary. */ if (regs->orig_gpr11) { int restart = 0; switch (regs->gpr[11]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: /* Restart if there is no signal handler */ restart = (signr <= 0); break; case -ERESTARTSYS: /* Restart if there no signal handler or * SA_RESTART flag is set */ restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART)); break; case -ERESTARTNOINTR: /* Always restart */ restart = 1; break; } if (restart) { if (regs->gpr[11] == -ERESTART_RESTARTBLOCK) regs->gpr[11] = __NR_restart_syscall; else regs->gpr[11] = regs->orig_gpr11; regs->pc -= 4; } else { regs->gpr[11] = -EINTR; } } if (signr <= 0) { /* no signal to deliver so we just put the saved sigmask * back */ restore_saved_sigmask(); } else { /* signr > 0 */ /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, &ka, regs); } return; } asmlinkage void do_notify_resume(struct pt_regs *regs) { if (current_thread_info()->flags & _TIF_SIGPENDING) do_signal(regs); if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
gpl-2.0
CyanogenMod/kernel-omap
sound/core/seq/seq_dummy.c
1914
6755
/* * ALSA sequencer MIDI-through client * Copyright (c) 1999-2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include "seq_clientmgr.h" #include <sound/initval.h> #include <sound/asoundef.h> /* Sequencer MIDI-through client This gives a simple midi-through client. All the normal input events are redirected to output port immediately. The routing can be done via aconnect program in alsa-utils. Each client has a static client number 62 (= SNDRV_SEQ_CLIENT_DUMMY). If you want to auto-load this module, you may add the following alias in your /etc/conf.modules file. alias snd-seq-client-62 snd-seq-dummy The module is loaded on demand for client 62, or /proc/asound/seq/ is accessed. If you don't need this module to be loaded, alias snd-seq-client-62 as "off". This will help modprobe. The number of ports to be created can be specified via the module parameter "ports". For example, to create four ports, add the following option in /etc/modprobe.conf: option snd-seq-dummy ports=4 The modle option "duplex=1" enables duplex operation to the port. In duplex mode, a pair of ports are created instead of single port, and events are tunneled between pair-ports. For example, input to port A is sent to output port of another port B and vice versa. In duplex mode, each port has DUPLEX capability. */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA sequencer MIDI-through client"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-seq-client-" __stringify(SNDRV_SEQ_CLIENT_DUMMY)); static int ports = 1; static int duplex; module_param(ports, int, 0444); MODULE_PARM_DESC(ports, "number of ports to be created"); module_param(duplex, bool, 0444); MODULE_PARM_DESC(duplex, "create DUPLEX ports"); struct snd_seq_dummy_port { int client; int port; int duplex; int connect; }; static int my_client = -1; /* * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events * to subscribers. * Note: this callback is called only after all subscribers are removed. */ static int dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_seq_dummy_port *p; int i; struct snd_seq_event ev; p = private_data; memset(&ev, 0, sizeof(ev)); if (p->duplex) ev.source.port = p->connect; else ev.source.port = p->port; ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; ev.type = SNDRV_SEQ_EVENT_CONTROLLER; for (i = 0; i < 16; i++) { ev.data.control.channel = i; ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); } return 0; } /* * event input callback - just redirect events to subscribers */ static int dummy_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_seq_dummy_port *p; struct snd_seq_event tmpev; p = private_data; if (ev->source.client == SNDRV_SEQ_CLIENT_SYSTEM || ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return 0; /* ignore system messages */ tmpev = *ev; if (p->duplex) tmpev.source.port = p->connect; else tmpev.source.port = p->port; tmpev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; return snd_seq_kernel_client_dispatch(p->client, &tmpev, atomic, hop); } /* * free_private callback */ static void dummy_free(void *private_data) { kfree(private_data); } /* * create a port */ static struct snd_seq_dummy_port __init * create_port(int idx, int type) { struct snd_seq_port_info pinfo; struct snd_seq_port_callback pcb; struct snd_seq_dummy_port *rec; if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL) return NULL; rec->client = my_client; rec->duplex = duplex; rec->connect = 0; memset(&pinfo, 0, sizeof(pinfo)); pinfo.addr.client = my_client; if (duplex) sprintf(pinfo.name, "Midi Through Port-%d:%c", idx, (type ? 'B' : 'A')); else sprintf(pinfo.name, "Midi Through Port-%d", idx); pinfo.capability = SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ; pinfo.capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE; if (duplex) pinfo.capability |= SNDRV_SEQ_PORT_CAP_DUPLEX; pinfo.type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_SOFTWARE | SNDRV_SEQ_PORT_TYPE_PORT; memset(&pcb, 0, sizeof(pcb)); pcb.owner = THIS_MODULE; pcb.unuse = dummy_unuse; pcb.event_input = dummy_input; pcb.private_free = dummy_free; pcb.private_data = rec; pinfo.kernel = &pcb; if (snd_seq_kernel_client_ctl(my_client, SNDRV_SEQ_IOCTL_CREATE_PORT, &pinfo) < 0) { kfree(rec); return NULL; } rec->port = pinfo.addr.port; return rec; } /* * register client and create ports */ static int __init register_client(void) { struct snd_seq_dummy_port *rec1, *rec2; int i; if (ports < 1) { snd_printk(KERN_ERR "invalid number of ports %d\n", ports); return -EINVAL; } /* create client */ my_client = snd_seq_create_kernel_client(NULL, SNDRV_SEQ_CLIENT_DUMMY, "Midi Through"); if (my_client < 0) return my_client; /* create ports */ for (i = 0; i < ports; i++) { rec1 = create_port(i, 0); if (rec1 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } if (duplex) { rec2 = create_port(i, 1); if (rec2 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } rec1->connect = rec2->port; rec2->connect = rec1->port; } } return 0; } /* * delete client if exists */ static void __exit delete_client(void) { if (my_client >= 0) snd_seq_delete_kernel_client(my_client); } /* * Init part */ static int __init alsa_seq_dummy_init(void) { int err; snd_seq_autoload_lock(); err = register_client(); snd_seq_autoload_unlock(); return err; } static void __exit alsa_seq_dummy_exit(void) { delete_client(); } module_init(alsa_seq_dummy_init) module_exit(alsa_seq_dummy_exit)
gpl-2.0
fideoman/Alucard-Kernel-jfltexx
drivers/mfd/pm8821-core.c
2170
9556
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/msm_ssbi.h> #include <linux/mfd/core.h> #include <linux/mfd/pm8xxx/pm8821.h> #include <linux/mfd/pm8xxx/core.h> #define REG_HWREV 0x002 /* PMIC4 revision */ #define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */ #define REG_MPP_BASE 0x050 #define REG_IRQ_BASE 0x100 #define REG_TEMP_ALARM_CTRL 0x01B #define REG_TEMP_ALARM_PWM 0x09B #define PM8821_VERSION_MASK 0xFFF0 #define PM8821_VERSION_VALUE 0x0BF0 #define PM8821_REVISION_MASK 0x000F #define SINGLE_IRQ_RESOURCE(_name, _irq) \ { \ .name = _name, \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ, \ } struct pm8821 { struct device *dev; struct pm_irq_chip *irq_chip; u32 rev_registers; }; static int pm8821_readb(const struct device *dev, u16 addr, u8 *val) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, val, 1); } static int pm8821_writeb(const struct device *dev, u16 addr, u8 val) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); } static int pm8821_read_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); } static int pm8821_write_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); } static int pm8821_read_irq_stat(const struct device *dev, int irq) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return pm8821_get_irq_stat(pmic->irq_chip, irq); } static enum pm8xxx_version pm8821_get_version(const struct device *dev) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; enum pm8xxx_version version = -ENODEV; if ((pmic->rev_registers & PM8821_VERSION_MASK) == PM8821_VERSION_VALUE) version = PM8XXX_VERSION_8821; return version; } static int pm8821_get_revision(const struct device *dev) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return pmic->rev_registers & PM8821_REVISION_MASK; } static struct pm8xxx_drvdata pm8821_drvdata = { .pmic_readb = pm8821_readb, .pmic_writeb = pm8821_writeb, .pmic_read_buf = pm8821_read_buf, .pmic_write_buf = pm8821_write_buf, .pmic_read_irq_stat = pm8821_read_irq_stat, .pmic_get_version = pm8821_get_version, .pmic_get_revision = pm8821_get_revision, }; static const struct resource mpp_cell_resources[] __devinitconst = { { .start = PM8821_IRQ_BLOCK_BIT(PM8821_MPP_BLOCK_START, 0), .end = PM8821_IRQ_BLOCK_BIT(PM8821_MPP_BLOCK_START, 0) + PM8821_NR_MPPS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell mpp_cell __devinitdata = { .name = PM8XXX_MPP_DEV_NAME, .id = 1, .resources = mpp_cell_resources, .num_resources = ARRAY_SIZE(mpp_cell_resources), }; static struct mfd_cell debugfs_cell __devinitdata = { .name = "pm8xxx-debug", .id = 1, .platform_data = "pm8821-dbg", .pdata_size = sizeof("pm8821-dbg"), }; static const struct resource thermal_alarm_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("pm8821_tempstat_irq", PM8821_TEMPSTAT_IRQ), SINGLE_IRQ_RESOURCE("pm8821_overtemp_irq", PM8821_OVERTEMP_IRQ), }; static struct pm8xxx_tm_core_data thermal_alarm_cdata = { .adc_type = PM8XXX_TM_ADC_NONE, .reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL, .reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM, .tm_name = "pm8821_tz", .irq_name_temp_stat = "pm8821_tempstat_irq", .irq_name_over_temp = "pm8821_overtemp_irq", .default_no_adc_temp = 37000, }; static struct mfd_cell thermal_alarm_cell __devinitdata = { .name = PM8XXX_TM_DEV_NAME, .id = 1, .resources = thermal_alarm_cell_resources, .num_resources = ARRAY_SIZE(thermal_alarm_cell_resources), .platform_data = &thermal_alarm_cdata, .pdata_size = sizeof(struct pm8xxx_tm_core_data), }; static int __devinit pm8821_add_subdevices(const struct pm8821_platform_data *pdata, struct pm8821 *pmic) { int ret = 0, irq_base = 0; struct pm_irq_chip *irq_chip; if (pdata->irq_pdata) { pdata->irq_pdata->irq_cdata.nirqs = PM8821_NR_IRQS; pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE; irq_base = pdata->irq_pdata->irq_base; irq_chip = pm8821_irq_init(pmic->dev, pdata->irq_pdata); if (IS_ERR(irq_chip)) { pr_err("Failed to init interrupts ret=%ld\n", PTR_ERR(irq_chip)); return PTR_ERR(irq_chip); } pmic->irq_chip = irq_chip; } if (pdata->mpp_pdata) { pdata->mpp_pdata->core_data.nmpps = PM8821_NR_MPPS; pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE; mpp_cell.platform_data = pdata->mpp_pdata; mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data); ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add mpp subdevice ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add debugfs subdevice ret=%d\n", ret); goto bail; } ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add thermal alarm subdevice ret=%d\n", ret); goto bail; } return 0; bail: if (pmic->irq_chip) { pm8821_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } return ret; } static const char * const pm8821_rev_names[] = { [PM8XXX_REVISION_8821_TEST] = "test", [PM8XXX_REVISION_8821_1p0] = "1.0", [PM8XXX_REVISION_8821_2p0] = "2.0", [PM8XXX_REVISION_8821_2p1] = "2.1", }; static int __devinit pm8821_probe(struct platform_device *pdev) { const struct pm8821_platform_data *pdata = pdev->dev.platform_data; const char *revision_name = "unknown"; struct pm8821 *pmic; enum pm8xxx_version version; int revision; int rc; u8 val; if (!pdata) { pr_err("missing platform data\n"); return -EINVAL; } pmic = kzalloc(sizeof(struct pm8821), GFP_KERNEL); if (!pmic) { pr_err("Cannot alloc pm8821 struct\n"); return -ENOMEM; } /* Read PMIC chip revision */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc); goto err_read_rev; } pr_info("PMIC revision 1: PM8821 rev %02X\n", val); pmic->rev_registers = val; /* Read PMIC chip revision 2 */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2, rc); goto err_read_rev; } pr_info("PMIC revision 2: PM8821 rev %02X\n", val); pmic->rev_registers |= val << BITS_PER_BYTE; pmic->dev = &pdev->dev; pm8821_drvdata.pm_chip_data = pmic; platform_set_drvdata(pdev, &pm8821_drvdata); /* Print out human readable version and revision names. */ version = pm8xxx_get_version(pmic->dev); if (version == PM8XXX_VERSION_8821) { revision = pm8xxx_get_revision(pmic->dev); if (revision >= 0 && revision < ARRAY_SIZE(pm8821_rev_names)) revision_name = pm8821_rev_names[revision]; pr_info("PMIC version: PM8821 ver %s\n", revision_name); } else { WARN_ON(version != PM8XXX_VERSION_8821); } rc = pm8821_add_subdevices(pdata, pmic); if (rc) { pr_err("Cannot add subdevices rc=%d\n", rc); goto err; } return 0; err: mfd_remove_devices(pmic->dev); platform_set_drvdata(pdev, NULL); err_read_rev: kfree(pmic); return rc; } static int __devexit pm8821_remove(struct platform_device *pdev) { struct pm8xxx_drvdata *drvdata; struct pm8821 *pmic = NULL; drvdata = platform_get_drvdata(pdev); if (drvdata) pmic = drvdata->pm_chip_data; if (pmic) { if (pmic->dev) mfd_remove_devices(pmic->dev); if (pmic->irq_chip) pm8821_irq_exit(pmic->irq_chip); } platform_set_drvdata(pdev, NULL); kfree(pmic); return 0; } static struct platform_driver pm8821_driver = { .probe = pm8821_probe, .remove = __devexit_p(pm8821_remove), .driver = { .name = "pm8821-core", .owner = THIS_MODULE, }, }; static int __init pm8821_init(void) { return platform_driver_register(&pm8821_driver); } postcore_initcall(pm8821_init); static void __exit pm8821_exit(void) { platform_driver_unregister(&pm8821_driver); } module_exit(pm8821_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC 8821 core driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pm8821-core");
gpl-2.0
kazukioishi/android_kernel_samsung_klte
arch/arm/mm/cache-pl310-erp.c
2170
7448
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/cpu.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/hardware/cache-l2x0.h> #define MODULE_NAME "pl310_erp" struct pl310_drv_data { unsigned int irq; unsigned int ecntr; unsigned int parrt; unsigned int parrd; unsigned int errwd; unsigned int errwt; unsigned int errrt; unsigned int errrd; unsigned int slverr; unsigned int decerr; void __iomem *base; unsigned int intr_mask_reg; }; #define ECNTR BIT(0) #define PARRT BIT(1) #define PARRD BIT(2) #define ERRWT BIT(3) #define ERRWD BIT(4) #define ERRRT BIT(5) #define ERRRD BIT(6) #define SLVERR BIT(7) #define DECERR BIT(8) static irqreturn_t pl310_erp_irq(int irq, void *dev_id) { struct pl310_drv_data *p = platform_get_drvdata(dev_id); uint16_t mask_int_stat, int_clear = 0, error = 0; mask_int_stat = readl_relaxed(p->base + L2X0_MASKED_INTR_STAT); if (mask_int_stat & ECNTR) { pr_alert("Event Counter1/0 Overflow Increment error\n"); p->ecntr++; int_clear = mask_int_stat & ECNTR; } if (mask_int_stat & PARRT) { pr_alert("Read parity error on L2 Tag RAM\n"); p->parrt++; error = 1; int_clear = mask_int_stat & PARRT; } if (mask_int_stat & PARRD) { pr_alert("Read parity error on L2 Tag RAM\n"); p->parrd++; error = 1; int_clear = mask_int_stat & PARRD; } if (mask_int_stat & ERRWT) { pr_alert("Write error on L2 Tag RAM\n"); p->errwt++; int_clear = mask_int_stat & ERRWT; } if (mask_int_stat & ERRWD) { pr_alert("Write error on L2 Data RAM\n"); p->errwd++; int_clear = mask_int_stat & ERRWD; } if (mask_int_stat & ERRRT) { pr_alert("Read error on L2 Tag RAM\n"); p->errrt++; int_clear = mask_int_stat & ERRRT; } if (mask_int_stat & ERRRD) { pr_alert("Read error on L2 Data RAM\n"); p->errrd++; int_clear = mask_int_stat & ERRRD; } if (mask_int_stat & DECERR) { pr_alert("L2 master port decode error\n"); p->decerr++; int_clear = mask_int_stat & DECERR; } if (mask_int_stat & SLVERR) { pr_alert("L2 slave port error\n"); p->slverr++; int_clear = mask_int_stat & SLVERR; } writel_relaxed(int_clear, p->base + L2X0_INTR_CLEAR); /* Make sure the interrupts are cleared */ mb(); /* WARNING will be thrown whenever we receive any L2 interrupt. * Other than parity on tag/data ram, irrespective of the bits * set we will throw a warning. */ WARN_ON(!error); /* Panic in case we encounter parity error in TAG/DATA Ram */ BUG_ON(error); return IRQ_HANDLED; } static void pl310_mask_int(struct pl310_drv_data *p, bool enable) { /* L2CC register contents needs to be saved * as it's power rail will be removed during suspend */ if (enable) p->intr_mask_reg = 0x1FF; else p->intr_mask_reg = 0x0; writel_relaxed(p->intr_mask_reg, p->base + L2X0_INTR_MASK); /* Make sure Mask is updated */ mb(); pr_debug("Mask interrupt 0%x\n", readl_relaxed(p->base + L2X0_INTR_MASK)); } static int pl310_erp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pl310_drv_data *p = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "L2CC Interrupt Number:\t\t\t%d\n"\ "Event Counter1/0 Overflow Increment:\t%u\n"\ "Parity Error on L2 Tag RAM (Read):\t%u\n"\ "Parity Error on L2 Data RAM (Read):\t%u\n"\ "Error on L2 Tag RAM (Write):\t\t%u\n"\ "Error on L2 Data RAM (Write):\t\t%u\n"\ "Error on L2 Tag RAM (Read):\t\t%u\n"\ "Error on L2 Data RAM (Read):\t\t%u\n"\ "SLave Error from L3 Port:\t\t%u\n"\ "Decode Error from L3 Port:\t\t%u\n", p->irq, p->ecntr, p->parrt, p->parrd, p->errwt, p->errwd, p->errrt, p->errrd, p->slverr, p->decerr); } static DEVICE_ATTR(cache_erp, 0664, pl310_erp_show, NULL); static int __init pl310_create_sysfs(struct device *dev) { /* create a sysfs entry at * /sys/devices/platform/pl310_erp/cache_erp */ return device_create_file(dev, &dev_attr_cache_erp); } static int __devinit pl310_cache_erp_probe(struct platform_device *pdev) { struct resource *r; struct pl310_drv_data *drv_data; int ret; drv_data = devm_kzalloc(&pdev->dev, sizeof(struct pl310_drv_data), GFP_KERNEL); if (drv_data == NULL) { dev_err(&pdev->dev, "cannot allocate memory\n"); ret = -ENOMEM; goto error; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "No L2 base address\n"); ret = -ENODEV; goto error; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), "erp")) { ret = -EBUSY; goto error; } drv_data->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); if (!drv_data->base) { dev_err(&pdev->dev, "errored to ioremap 0x%x\n", r->start); ret = -ENOMEM; goto error; } dev_dbg(&pdev->dev, "L2CC base 0x%p\n", drv_data->base); r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "l2_irq"); if (!r) { dev_err(&pdev->dev, "No L2 IRQ resource\n"); ret = -ENODEV; goto error; } drv_data->irq = r->start; ret = devm_request_irq(&pdev->dev, drv_data->irq, pl310_erp_irq, IRQF_TRIGGER_RISING, "l2cc_intr", pdev); if (ret) { dev_err(&pdev->dev, "request irq for L2 interrupt failed\n"); goto error; } platform_set_drvdata(pdev, drv_data); pl310_mask_int(drv_data, true); ret = pl310_create_sysfs(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to create sysfs entry\n"); goto sysfs_err; } return 0; sysfs_err: platform_set_drvdata(pdev, NULL); pl310_mask_int(drv_data, false); error: return ret; } static int __devexit pl310_cache_erp_remove(struct platform_device *pdev) { struct pl310_drv_data *p = platform_get_drvdata(pdev); pl310_mask_int(p, false); device_remove_file(&pdev->dev, &dev_attr_cache_erp); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int pl310_suspend(struct device *dev) { struct pl310_drv_data *p = dev_get_drvdata(dev); disable_irq(p->irq); return 0; } static int pl310_resume_early(struct device *dev) { struct pl310_drv_data *p = dev_get_drvdata(dev); pl310_mask_int(p, true); enable_irq(p->irq); return 0; } static const struct dev_pm_ops pl310_cache_pm_ops = { .suspend = pl310_suspend, .resume_early = pl310_resume_early, }; #endif static struct platform_driver pl310_cache_erp_driver = { .probe = pl310_cache_erp_probe, .remove = __devexit_p(pl310_cache_erp_remove), .driver = { .name = MODULE_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pl310_cache_pm_ops, #endif }, }; static int __init pl310_cache_erp_init(void) { return platform_driver_register(&pl310_cache_erp_driver); } module_init(pl310_cache_erp_init); static void __exit pl310_cache_erp_exit(void) { platform_driver_unregister(&pl310_cache_erp_driver); } module_exit(pl310_cache_erp_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PL310 cache error reporting driver");
gpl-2.0
championswimmer/android_kernel_sony_huashan
arch/arm/mach-iop13xx/iq81340sc.c
4730
2656
/* * iq81340sc board support * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/pci.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pci.h> #include <asm/mach/time.h> #include <mach/time.h> extern int init_atu; static int __init iq81340sc_atux_map_irq(struct pci_dev *dev, u8 idsel, u8 pin) { WARN_ON(idsel < 1 || idsel > 2); switch (idsel) { case 1: switch (pin) { case 1: return ATUX_INTB; case 2: return ATUX_INTC; case 3: return ATUX_INTD; case 4: return ATUX_INTA; default: return -1; } case 2: switch (pin) { case 1: return ATUX_INTC; case 2: return ATUX_INTC; case 3: return ATUX_INTC; case 4: return ATUX_INTC; default: return -1; } default: return -1; } } static struct hw_pci iq81340sc_pci __initdata = { .swizzle = pci_std_swizzle, .nr_controllers = 0, .setup = iop13xx_pci_setup, .scan = iop13xx_scan_bus, .map_irq = iq81340sc_atux_map_irq, .preinit = iop13xx_pci_init }; static int __init iq81340sc_pci_init(void) { iop13xx_atu_select(&iq81340sc_pci); pci_common_init(&iq81340sc_pci); iop13xx_map_pci_memory(); return 0; } static void __init iq81340sc_init(void) { iop13xx_platform_init(); iq81340sc_pci_init(); iop13xx_add_tpmi_devices(); } static void __init iq81340sc_timer_init(void) { unsigned long bus_freq = iop13xx_core_freq() / iop13xx_xsi_bus_ratio(); printk(KERN_DEBUG "%s: bus frequency: %lu\n", __func__, bus_freq); iop_init_time(bus_freq); } static struct sys_timer iq81340sc_timer = { .init = iq81340sc_timer_init, }; MACHINE_START(IQ81340SC, "Intel IQ81340SC") /* Maintainer: Dan Williams <dan.j.williams@intel.com> */ .atag_offset = 0x100, .init_early = iop13xx_init_early, .map_io = iop13xx_map_io, .init_irq = iop13xx_init_irq, .timer = &iq81340sc_timer, .init_machine = iq81340sc_init, .restart = iop13xx_restart, MACHINE_END
gpl-2.0
SOKP/kernel_oneplus_msm8974
arch/arm/mach-at91/board-dt.c
4730
1609
/* * Setup code for AT91SAM Evaluation Kits with Device Tree support * * Covers: * AT91SAM9G45-EKES board * * AT91SAM9M10-EKES board * * AT91SAM9M10G45-EK board * * Copyright (C) 2011 Atmel, * 2011 Nicolas Ferre <nicolas.ferre@atmel.com> * * Licensed under GPLv2 or later. */ #include <linux/types.h> #include <linux/init.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <mach/board.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include "generic.h" static const struct of_device_id irq_of_match[] __initconst = { { .compatible = "atmel,at91rm9200-aic", .data = at91_aic_of_init }, { .compatible = "atmel,at91rm9200-gpio", .data = at91_gpio_of_irq_setup }, { .compatible = "atmel,at91sam9x5-gpio", .data = at91_gpio_of_irq_setup }, { /*sentinel*/ } }; static void __init at91_dt_init_irq(void) { of_irq_init(irq_of_match); } static void __init at91_dt_device_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } static const char *at91_dt_board_compat[] __initdata = { "atmel,at91sam9m10g45ek", "atmel,at91sam9x5ek", "calao,usb-a9g20", NULL }; DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = at91_dt_initialize, .init_irq = at91_dt_init_irq, .init_machine = at91_dt_device_init, .dt_compat = at91_dt_board_compat, MACHINE_END
gpl-2.0
cuzz1369/android_kernel_lge_g3
arch/arm/mach-at91/board-sam9rlek.c
4730
7002
/* * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_init_early(void) { /* Initialize processor: 12.000 MHz crystal */ at91_initialize(12000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9RL_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB HS Device port */ static struct usba_platform_data __initdata ek_usba_udc_data = { .vbus_pin = AT91_PIN_PA8, }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .wire4 = 1, .det_pin = AT91_PIN_PA15, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Partition 1", .offset = 0, .size = SZ_256K, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PD17, .enable_pin = AT91_PIN_PB6, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * SPI devices */ static struct spi_board_info ek_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, }; /* * LCD Controller */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static struct fb_videomode at91_tft_vga_modes[] = { { .name = "TX09D50VM1CCA @ 60", .refresh = 60, .xres = 240, .yres = 320, .pixclock = KHZ2PICOS(4965), .left_margin = 1, .right_margin = 33, .upper_margin = 1, .lower_margin = 0, .hsync_len = 5, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs at91fb_default_monspecs = { .manufacturer = "HIT", .monitor = "TX09D50VM1CCA", .modedb = at91_tft_vga_modes, .modedb_len = ARRAY_SIZE(at91_tft_vga_modes), .hfmin = 15000, .hfmax = 64000, .vfmin = 50, .vfmax = 150, }; #define AT91SAM9RL_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \ | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) static void at91_lcdc_power_control(int on) { if (on) at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */ else at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */ } /* Driver datas */ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2, .default_monspecs = &at91fb_default_monspecs, .atmel_lcdfb_power_control = at91_lcdc_power_control, .guard_time = 1, .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, }; #else static struct atmel_lcdfb_info __initdata ek_lcdc_data; #endif /* * AC97 * reset_pin is not connected: NRST */ static struct ac97c_platform_data ek_ac97_data = { .reset_pin = -EINVAL, }; /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds1", .gpio = AT91_PIN_PD15, .active_low = 1, .default_trigger = "none", }, { /* "bottom" led, green, userled2 to be defined */ .name = "ds2", .gpio = AT91_PIN_PD16, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds3", .gpio = AT91_PIN_PD14, .default_trigger = "heartbeat", } }; /* * Touchscreen */ static struct at91_tsadcc_data ek_tsadcc_data = { .adc_clock = 1000000, .pendet_debounce = 0x0f, .ts_sample_hold_time = 0x03, }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PB0, .code = BTN_2, .desc = "Right Click", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB1, .code = BTN_1, .desc = "Left Click", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PB1, 1); /* btn1 */ at91_set_deglitch(AT91_PIN_PB1, 1); at91_set_gpio_input(AT91_PIN_PB0, 1); /* btn2 */ at91_set_deglitch(AT91_PIN_PB0, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB HS */ at91_add_device_usba(&ek_usba_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* NAND */ ek_add_device_nand(); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* MMC */ at91_add_device_mmc(0, &ek_mmc_data); /* LCD Controller */ at91_add_device_lcdc(&ek_lcdc_data); /* AC97 */ at91_add_device_ac97(&ek_ac97_data); /* Touch Screen Controller */ at91_add_device_tsadcc(&ek_tsadcc_data); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* Push Buttons */ ek_add_device_buttons(); } MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = ek_init_early, .init_irq = at91_init_irq_default, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
zombi-x/android_kernel_htc_m7
arch/arm/mach-ixp4xx/goramo_mlr.c
4730
12484
/* * Goramo MultiLink router platform code * Copyright (C) 2006-2009 Krzysztof Halasa <khc@pm.waw.pl> */ #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/i2c-gpio.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/serial_8250.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/pci.h> #define SLOT_ETHA 0x0B /* IDSEL = AD21 */ #define SLOT_ETHB 0x0C /* IDSEL = AD20 */ #define SLOT_MPCI 0x0D /* IDSEL = AD19 */ #define SLOT_NEC 0x0E /* IDSEL = AD18 */ /* GPIO lines */ #define GPIO_SCL 0 #define GPIO_SDA 1 #define GPIO_STR 2 #define GPIO_IRQ_NEC 3 #define GPIO_IRQ_ETHA 4 #define GPIO_IRQ_ETHB 5 #define GPIO_HSS0_DCD_N 6 #define GPIO_HSS1_DCD_N 7 #define GPIO_UART0_DCD 8 #define GPIO_UART1_DCD 9 #define GPIO_HSS0_CTS_N 10 #define GPIO_HSS1_CTS_N 11 #define GPIO_IRQ_MPCI 12 #define GPIO_HSS1_RTS_N 13 #define GPIO_HSS0_RTS_N 14 /* GPIO15 is not connected */ /* Control outputs from 74HC4094 */ #define CONTROL_HSS0_CLK_INT 0 #define CONTROL_HSS1_CLK_INT 1 #define CONTROL_HSS0_DTR_N 2 #define CONTROL_HSS1_DTR_N 3 #define CONTROL_EXT 4 #define CONTROL_AUTO_RESET 5 #define CONTROL_PCI_RESET_N 6 #define CONTROL_EEPROM_WC_N 7 /* offsets from start of flash ROM = 0x50000000 */ #define CFG_ETH0_ADDRESS 0x40 /* 6 bytes */ #define CFG_ETH1_ADDRESS 0x46 /* 6 bytes */ #define CFG_REV 0x4C /* u32 */ #define CFG_SDRAM_SIZE 0x50 /* u32 */ #define CFG_SDRAM_CONF 0x54 /* u32 */ #define CFG_SDRAM_MODE 0x58 /* u32 */ #define CFG_SDRAM_REFRESH 0x5C /* u32 */ #define CFG_HW_BITS 0x60 /* u32 */ #define CFG_HW_USB_PORTS 0x00000007 /* 0 = no NEC chip, 1-5 = ports # */ #define CFG_HW_HAS_PCI_SLOT 0x00000008 #define CFG_HW_HAS_ETH0 0x00000010 #define CFG_HW_HAS_ETH1 0x00000020 #define CFG_HW_HAS_HSS0 0x00000040 #define CFG_HW_HAS_HSS1 0x00000080 #define CFG_HW_HAS_UART0 0x00000100 #define CFG_HW_HAS_UART1 0x00000200 #define CFG_HW_HAS_EEPROM 0x00000400 #define FLASH_CMD_READ_ARRAY 0xFF #define FLASH_CMD_READ_ID 0x90 #define FLASH_SER_OFF 0x102 /* 0x81 in 16-bit mode */ static u32 hw_bits = 0xFFFFFFFD; /* assume all hardware present */; static u8 control_value; static void set_scl(u8 value) { gpio_line_set(GPIO_SCL, !!value); udelay(3); } static void set_sda(u8 value) { gpio_line_set(GPIO_SDA, !!value); udelay(3); } static void set_str(u8 value) { gpio_line_set(GPIO_STR, !!value); udelay(3); } static inline void set_control(int line, int value) { if (value) control_value |= (1 << line); else control_value &= ~(1 << line); } static void output_control(void) { int i; gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); for (i = 0; i < 8; i++) { set_scl(0); set_sda(control_value & (0x80 >> i)); /* MSB first */ set_scl(1); /* active edge */ } set_str(1); set_str(0); set_scl(0); set_sda(1); /* Be ready for START */ set_scl(1); } static void (*set_carrier_cb_tab[2])(void *pdev, int carrier); static int hss_set_clock(int port, unsigned int clock_type) { int ctrl_int = port ? CONTROL_HSS1_CLK_INT : CONTROL_HSS0_CLK_INT; switch (clock_type) { case CLOCK_DEFAULT: case CLOCK_EXT: set_control(ctrl_int, 0); output_control(); return CLOCK_EXT; case CLOCK_INT: set_control(ctrl_int, 1); output_control(); return CLOCK_INT; default: return -EINVAL; } } static irqreturn_t hss_dcd_irq(int irq, void *pdev) { int i, port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N)); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb_tab[port](pdev, !i); return IRQ_HANDLED; } static int hss_open(int port, void *pdev, void (*set_carrier_cb)(void *pdev, int carrier)) { int i, irq; if (!port) irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N); else irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb(pdev, !i); set_carrier_cb_tab[!!port] = set_carrier_cb; if ((i = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", pdev)) != 0) { printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n", irq, i); return i; } set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0); return 0; } static void hss_close(int port, void *pdev) { free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) : IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev); set_carrier_cb_tab[!!port] = NULL; /* catch bugs */ set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1); } /* Flash memory */ static struct flash_platform_data flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device device_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &flash_data }, .num_resources = 1, .resource = &flash_resource, }; /* I^2C interface */ static struct i2c_gpio_platform_data i2c_data = { .sda_pin = GPIO_SDA, .scl_pin = GPIO_SCL, }; static struct platform_device device_i2c = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_data }, }; /* IXP425 2 UART ports */ static struct resource uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, } }; static struct plat_serial8250_port uart_data[] = { { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device device_uarts = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = uart_data, .num_resources = 2, .resource = uart_resources, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info eth_plat[] = { { .phy = 0, .rxq = 3, .txreadyq = 32, }, { .phy = 1, .rxq = 4, .txreadyq = 33, } }; static struct platform_device device_eth_tab[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = eth_plat, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = eth_plat + 1, } }; /* IXP425 2 synchronous serial ports */ static struct hss_plat_info hss_plat[] = { { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 34, }, { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 35, } }; static struct platform_device device_hss_tab[] = { { .name = "ixp4xx_hss", .id = 0, .dev.platform_data = hss_plat, }, { .name = "ixp4xx_hss", .id = 1, .dev.platform_data = hss_plat + 1, } }; static struct platform_device *device_tab[6] __initdata = { &device_flash, /* index 0 */ }; static inline u8 __init flash_readb(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readb(flash + addr); #else return __raw_readb(flash + (addr ^ 3)); #endif } static inline u16 __init flash_readw(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readw(flash + addr); #else return __raw_readw(flash + (addr ^ 2)); #endif } static void __init gmlr_init(void) { u8 __iomem *flash; int i, devices = 1; /* flash */ ixp4xx_sys_init(); if ((flash = ioremap(IXP4XX_EXP_BUS_BASE_PHYS, 0x80)) == NULL) printk(KERN_ERR "goramo-mlr: unable to access system" " configuration data\n"); else { system_rev = __raw_readl(flash + CFG_REV); hw_bits = __raw_readl(flash + CFG_HW_BITS); for (i = 0; i < ETH_ALEN; i++) { eth_plat[0].hwaddr[i] = flash_readb(flash, CFG_ETH0_ADDRESS + i); eth_plat[1].hwaddr[i] = flash_readb(flash, CFG_ETH1_ADDRESS + i); } __raw_writew(FLASH_CMD_READ_ID, flash); system_serial_high = flash_readw(flash, FLASH_SER_OFF); system_serial_high <<= 16; system_serial_high |= flash_readw(flash, FLASH_SER_OFF + 2); system_serial_low = flash_readw(flash, FLASH_SER_OFF + 4); system_serial_low <<= 16; system_serial_low |= flash_readw(flash, FLASH_SER_OFF + 6); __raw_writew(FLASH_CMD_READ_ARRAY, flash); iounmap(flash); } switch (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) { case CFG_HW_HAS_UART0: memset(&uart_data[1], 0, sizeof(uart_data[1])); device_uarts.num_resources = 1; break; case CFG_HW_HAS_UART1: device_uarts.dev.platform_data = &uart_data[1]; device_uarts.resource = &uart_resources[1]; device_uarts.num_resources = 1; break; } if (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) device_tab[devices++] = &device_uarts; /* max index 1 */ if (hw_bits & CFG_HW_HAS_ETH0) device_tab[devices++] = &device_eth_tab[0]; /* max index 2 */ if (hw_bits & CFG_HW_HAS_ETH1) device_tab[devices++] = &device_eth_tab[1]; /* max index 3 */ if (hw_bits & CFG_HW_HAS_HSS0) device_tab[devices++] = &device_hss_tab[0]; /* max index 4 */ if (hw_bits & CFG_HW_HAS_HSS1) device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */ if (hw_bits & CFG_HW_HAS_EEPROM) device_tab[devices++] = &device_i2c; /* max index 6 */ gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_STR, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); set_control(CONTROL_HSS0_DTR_N, 1); set_control(CONTROL_HSS1_DTR_N, 1); set_control(CONTROL_EEPROM_WC_N, 1); set_control(CONTROL_PCI_RESET_N, 1); output_control(); msleep(1); /* Wait for PCI devices to initialize */ flash_resource.start = IXP4XX_EXP_BUS_BASE(0); flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; platform_add_devices(device_tab, devices); } #ifdef CONFIG_PCI static void __init gmlr_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static void __init gmlr_pci_postinit(void) { if ((hw_bits & CFG_HW_USB_PORTS) >= 2 && (hw_bits & CFG_HW_USB_PORTS) < 5) { /* need to adjust number of USB ports on NEC chip */ u32 value, addr = BIT(32 - SLOT_NEC) | 0xE0; if (!ixp4xx_pci_read(addr, NP_CMD_CONFIGREAD, &value)) { value &= ~7; value |= (hw_bits & CFG_HW_USB_PORTS); ixp4xx_pci_write(addr, NP_CMD_CONFIGWRITE, value); } } } static int __init gmlr_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch(slot) { case SLOT_ETHA: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA); case SLOT_ETHB: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB); case SLOT_NEC: return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC); default: return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI); } } static struct hw_pci gmlr_hw_pci __initdata = { .nr_controllers = 1, .preinit = gmlr_pci_preinit, .postinit = gmlr_pci_postinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = gmlr_map_irq, }; static int __init gmlr_pci_init(void) { if (machine_is_goramo_mlr() && (hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_PCI_SLOT))) pci_common_init(&gmlr_hw_pci); return 0; } subsys_initcall(gmlr_pci_init); #endif /* CONFIG_PCI */ MACHINE_START(GORAMO_MLR, "MultiLink") /* Maintainer: Krzysztof Halasa */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .atag_offset = 0x100, .init_machine = gmlr_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END
gpl-2.0
sktjdgns1189/android_kernel_pantech_ef63l
arch/arm/mach-mxs/mach-apx4devkit.c
4730
7429
/* * Copyright (C) 2011-2012 * Lauri Hintsala, Bluegiga, <lauri.hintsala@bluegiga.com> * Veli-Pekka Peltola, Bluegiga, <veli-pekka.peltola@bluegiga.com> * * based on: mach-mx28evk.c * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/clk.h> #include <linux/i2c.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/micrel_phy.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/digctl.h> #include <mach/iomux-mx28.h> #include "devices-mx28.h" #define APX4DEVKIT_GPIO_USERLED MXS_GPIO_NR(3, 28) static const iomux_cfg_t apx4devkit_pads[] __initconst = { /* duart */ MX28_PAD_PWM0__DUART_RX | MXS_PAD_CTRL, MX28_PAD_PWM1__DUART_TX | MXS_PAD_CTRL, /* auart0 */ MX28_PAD_AUART0_RX__AUART0_RX | MXS_PAD_CTRL, MX28_PAD_AUART0_TX__AUART0_TX | MXS_PAD_CTRL, MX28_PAD_AUART0_CTS__AUART0_CTS | MXS_PAD_CTRL, MX28_PAD_AUART0_RTS__AUART0_RTS | MXS_PAD_CTRL, /* auart1 */ MX28_PAD_AUART1_RX__AUART1_RX | MXS_PAD_CTRL, MX28_PAD_AUART1_TX__AUART1_TX | MXS_PAD_CTRL, /* auart2 */ MX28_PAD_SSP2_SCK__AUART2_RX | MXS_PAD_CTRL, MX28_PAD_SSP2_MOSI__AUART2_TX | MXS_PAD_CTRL, /* auart3 */ MX28_PAD_SSP2_MISO__AUART3_RX | MXS_PAD_CTRL, MX28_PAD_SSP2_SS0__AUART3_TX | MXS_PAD_CTRL, #define MXS_PAD_FEC (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP) /* fec0 */ MX28_PAD_ENET0_MDC__ENET0_MDC | MXS_PAD_FEC, MX28_PAD_ENET0_MDIO__ENET0_MDIO | MXS_PAD_FEC, MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | MXS_PAD_FEC, MX28_PAD_ENET0_RXD0__ENET0_RXD0 | MXS_PAD_FEC, MX28_PAD_ENET0_RXD1__ENET0_RXD1 | MXS_PAD_FEC, MX28_PAD_ENET0_TX_EN__ENET0_TX_EN | MXS_PAD_FEC, MX28_PAD_ENET0_TXD0__ENET0_TXD0 | MXS_PAD_FEC, MX28_PAD_ENET0_TXD1__ENET0_TXD1 | MXS_PAD_FEC, MX28_PAD_ENET_CLK__CLKCTRL_ENET | MXS_PAD_FEC, /* i2c */ MX28_PAD_I2C0_SCL__I2C0_SCL, MX28_PAD_I2C0_SDA__I2C0_SDA, /* mmc0 */ MX28_PAD_SSP0_DATA0__SSP0_D0 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA1__SSP0_D1 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA2__SSP0_D2 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA3__SSP0_D3 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA4__SSP0_D4 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA5__SSP0_D5 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA6__SSP0_D6 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA7__SSP0_D7 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_CMD__SSP0_CMD | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), MX28_PAD_SSP0_SCK__SSP0_SCK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), /* led */ MX28_PAD_PWM3__GPIO_3_28 | MXS_PAD_CTRL, /* saif0 & saif1 */ MX28_PAD_SAIF0_MCLK__SAIF0_MCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), }; /* led */ static const struct gpio_led apx4devkit_leds[] __initconst = { { .name = "user-led", .default_trigger = "heartbeat", .gpio = APX4DEVKIT_GPIO_USERLED, }, }; static const struct gpio_led_platform_data apx4devkit_led_data __initconst = { .leds = apx4devkit_leds, .num_leds = ARRAY_SIZE(apx4devkit_leds), }; static const struct fec_platform_data mx28_fec_pdata __initconst = { .phy = PHY_INTERFACE_MODE_RMII, }; static const struct mxs_mmc_platform_data apx4devkit_mmc_pdata __initconst = { .wp_gpio = -EINVAL, .flags = SLOTF_4_BIT_CAPABLE, }; static const struct i2c_board_info apx4devkit_i2c_boardinfo[] __initconst = { { I2C_BOARD_INFO("sgtl5000", 0x0a) }, /* ASoC */ { I2C_BOARD_INFO("pcf8563", 0x51) }, /* RTC */ }; #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || \ defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) static struct regulator_consumer_supply apx4devkit_audio_consumer_supplies[] = { REGULATOR_SUPPLY("VDDA", "0-000a"), REGULATOR_SUPPLY("VDDIO", "0-000a"), }; static struct regulator_init_data apx4devkit_vdd_reg_init_data = { .constraints = { .name = "3V3", .always_on = 1, }, .consumer_supplies = apx4devkit_audio_consumer_supplies, .num_consumer_supplies = ARRAY_SIZE(apx4devkit_audio_consumer_supplies), }; static struct fixed_voltage_config apx4devkit_vdd_pdata = { .supply_name = "board-3V3", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 1, .init_data = &apx4devkit_vdd_reg_init_data, }; static struct platform_device apx4devkit_voltage_regulator = { .name = "reg-fixed-voltage", .id = -1, .num_resources = 0, .dev = { .platform_data = &apx4devkit_vdd_pdata, }, }; static void __init apx4devkit_add_regulators(void) { platform_device_register(&apx4devkit_voltage_regulator); } #else static void __init apx4devkit_add_regulators(void) {} #endif static const struct mxs_saif_platform_data apx4devkit_mxs_saif_pdata[] __initconst = { /* working on EXTMSTR0 mode (saif0 master, saif1 slave) */ { .master_mode = 1, .master_id = 0, }, { .master_mode = 0, .master_id = 0, }, }; static int apx4devkit_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MICREL_PHY_50MHZ_CLK; return 0; } static void __init apx4devkit_init(void) { mxs_iomux_setup_multiple_pads(apx4devkit_pads, ARRAY_SIZE(apx4devkit_pads)); mx28_add_duart(); mx28_add_auart0(); mx28_add_auart1(); mx28_add_auart2(); mx28_add_auart3(); /* * Register fixup for the Micrel KS8031 PHY clock * (shares same ID with KS8051) */ phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, apx4devkit_phy_fixup); mx28_add_fec(0, &mx28_fec_pdata); mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata); gpio_led_register_device(0, &apx4devkit_led_data); mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); mx28_add_saif(0, &apx4devkit_mxs_saif_pdata[0]); mx28_add_saif(1, &apx4devkit_mxs_saif_pdata[1]); apx4devkit_add_regulators(); mx28_add_mxs_i2c(0); i2c_register_board_info(0, apx4devkit_i2c_boardinfo, ARRAY_SIZE(apx4devkit_i2c_boardinfo)); mxs_add_platform_device("mxs-sgtl5000", 0, NULL, 0, NULL, 0); } static void __init apx4devkit_timer_init(void) { mx28_clocks_init(); } static struct sys_timer apx4devkit_timer = { .init = apx4devkit_timer_init, }; MACHINE_START(APX4DEVKIT, "Bluegiga APX4 Development Kit") .map_io = mx28_map_io, .init_irq = mx28_init_irq, .timer = &apx4devkit_timer, .init_machine = apx4devkit_init, .restart = mxs_restart, MACHINE_END
gpl-2.0
Dazzozo/huawei-kernel-3.4
arch/arm/mach-at91/board-kb9202.c
4730
3692
/* * linux/arch/arm/mach-at91/board-kb9202.c * * Copyright (c) 2005 kb_admin * KwikByte, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91rm9200_mc.h> #include <mach/at91_ramc.h> #include "generic.h" static void __init kb9202_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); /* Initialize processor: 10 MHz crystal */ at91_initialize(10000000); /* Set up the LEDs */ at91_init_leds(AT91_PIN_PC19, AT91_PIN_PC18); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1 (Rx & Tx only) */ at91_register_uart(AT91RM9200_ID_US0, 1, 0); /* USART1 on ttyS2 (Rx & Tx only) - IRDA (optional) */ at91_register_uart(AT91RM9200_ID_US1, 2, 0); /* USART3 on ttyS3 (Rx, Tx, CTS, RTS) - RS485 (optional) */ at91_register_uart(AT91RM9200_ID_US3, 3, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static struct macb_platform_data __initdata kb9202_eth_data = { .phy_irq_pin = AT91_PIN_PB29, .is_rmii = 0, }; static struct at91_usbh_data __initdata kb9202_usbh_data = { .ports = 1, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata kb9202_udc_data = { .vbus_pin = AT91_PIN_PB24, .pullup_pin = AT91_PIN_PB22, }; static struct at91_mmc_data __initdata kb9202_mmc_data = { .det_pin = AT91_PIN_PB2, .slot_b = 0, .wire4 = 1, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; static struct mtd_partition __initdata kb9202_nand_partition[] = { { .name = "nand_fs", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata kb9202_nand_data = { .ale = 22, .cle = 21, .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC29, .enable_pin = AT91_PIN_PC28, .ecc_mode = NAND_ECC_SOFT, .parts = kb9202_nand_partition, .num_parts = ARRAY_SIZE(kb9202_nand_partition), }; static void __init kb9202_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&kb9202_eth_data); /* USB Host */ at91_add_device_usbh(&kb9202_usbh_data); /* USB Device */ at91_add_device_udc(&kb9202_udc_data); /* MMC */ at91_add_device_mmc(0, &kb9202_mmc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(NULL, 0); /* NAND */ at91_add_device_nand(&kb9202_nand_data); } MACHINE_START(KB9200, "KB920x") /* Maintainer: KwikByte, Inc. */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = kb9202_init_early, .init_irq = at91_init_irq_default, .init_machine = kb9202_board_init, MACHINE_END
gpl-2.0
burakgon/E7_Elite_kernel
kernel/arch/mips/vr41xx/common/giu.c
4986
2825
/* * NEC VR4100 series GIU platform device. * * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <asm/cpu.h> #include <asm/vr41xx/giu.h> #include <asm/vr41xx/irq.h> static struct resource giu_50pins_pullupdown_resource[] __initdata = { { .start = 0x0b000100, .end = 0x0b00011f, .flags = IORESOURCE_MEM, }, { .start = 0x0b0002e0, .end = 0x0b0002e3, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource giu_36pins_resource[] __initdata = { { .start = 0x0f000140, .end = 0x0f00015f, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource giu_48pins_resource[] __initdata = { { .start = 0x0f000140, .end = 0x0f000167, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static int __init vr41xx_giu_add(void) { struct platform_device *pdev; struct resource *res; unsigned int num; int retval; pdev = platform_device_alloc("GIU", -1); if (!pdev) return -ENOMEM; switch (current_cpu_type()) { case CPU_VR4111: case CPU_VR4121: pdev->id = GPIO_50PINS_PULLUPDOWN; res = giu_50pins_pullupdown_resource; num = ARRAY_SIZE(giu_50pins_pullupdown_resource); break; case CPU_VR4122: case CPU_VR4131: pdev->id = GPIO_36PINS; res = giu_36pins_resource; num = ARRAY_SIZE(giu_36pins_resource); break; case CPU_VR4133: pdev->id = GPIO_48PINS_EDGE_SELECT; res = giu_48pins_resource; num = ARRAY_SIZE(giu_48pins_resource); break; default: retval = -ENODEV; goto err_free_device; } retval = platform_device_add_resources(pdev, res, num); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(vr41xx_giu_add);
gpl-2.0
luisetex84/android_kernel_lenovo_kingdom_row
drivers/crypto/omap-sham.c
4986
31816
/* * Cryptographic API. * * Support for OMAP SHA1/MD5 HW acceleration. * * Copyright (c) 2010 Nokia Corporation * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Some ideas are from old omap-sha1-md5.c driver. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/err.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/crypto.h> #include <linux/cryptohash.h> #include <crypto/scatterwalk.h> #include <crypto/algapi.h> #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #include <plat/cpu.h> #include <plat/dma.h> #include <mach/irqs.h> #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE #define MD5_DIGEST_SIZE 16 #define SHA_REG_DIGCNT 0x14 #define SHA_REG_CTRL 0x18 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) #define SHA_REG_CTRL_ALGO_CONST (1 << 3) #define SHA_REG_CTRL_ALGO (1 << 2) #define SHA_REG_CTRL_INPUT_READY (1 << 1) #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) #define SHA_REG_REV 0x5C #define SHA_REG_REV_MAJOR 0xF0 #define SHA_REG_REV_MINOR 0x0F #define SHA_REG_MASK 0x60 #define SHA_REG_MASK_DMA_EN (1 << 3) #define SHA_REG_MASK_IT_EN (1 << 2) #define SHA_REG_MASK_SOFTRESET (1 << 1) #define SHA_REG_AUTOIDLE (1 << 0) #define SHA_REG_SYSSTATUS 0x64 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) #define DEFAULT_TIMEOUT_INTERVAL HZ /* mostly device flags */ #define FLAGS_BUSY 0 #define FLAGS_FINAL 1 #define FLAGS_DMA_ACTIVE 2 #define FLAGS_OUTPUT_READY 3 #define FLAGS_INIT 4 #define FLAGS_CPU 5 #define FLAGS_DMA_READY 6 /* context flags */ #define FLAGS_FINUP 16 #define FLAGS_SG 17 #define FLAGS_SHA1 18 #define FLAGS_HMAC 19 #define FLAGS_ERROR 20 #define OP_UPDATE 1 #define OP_FINAL 2 #define OMAP_ALIGN_MASK (sizeof(u32)-1) #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) #define BUFLEN PAGE_SIZE struct omap_sham_dev; struct omap_sham_reqctx { struct omap_sham_dev *dd; unsigned long flags; unsigned long op; u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; size_t bufcnt; size_t buflen; dma_addr_t dma_addr; /* walk state */ struct scatterlist *sg; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ u8 buffer[0] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { struct crypto_shash *shash; u8 ipad[SHA1_MD5_BLOCK_SIZE]; u8 opad[SHA1_MD5_BLOCK_SIZE]; }; struct omap_sham_ctx { struct omap_sham_dev *dd; unsigned long flags; /* fallback stuff */ struct crypto_shash *fallback; struct omap_sham_hmac_ctx base[0]; }; #define OMAP_SHAM_QUEUE_LENGTH 1 struct omap_sham_dev { struct list_head list; unsigned long phys_base; struct device *dev; void __iomem *io_base; int irq; struct clk *iclk; spinlock_t lock; int err; int dma; int dma_lch; struct tasklet_struct done_task; unsigned long flags; struct crypto_queue queue; struct ahash_request *req; }; struct omap_sham_drv { struct list_head dev_list; spinlock_t lock; unsigned long flags; }; static struct omap_sham_drv sham = { .dev_list = LIST_HEAD_INIT(sham.dev_list), .lock = __SPIN_LOCK_UNLOCKED(sham.lock), }; static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) { return __raw_readl(dd->io_base + offset); } static inline void omap_sham_write(struct omap_sham_dev *dd, u32 offset, u32 value) { __raw_writel(value, dd->io_base + offset); } static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, u32 value, u32 mask) { u32 val; val = omap_sham_read(dd, address); val &= ~mask; val |= value; omap_sham_write(dd, address, val); } static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) { unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; while (!(omap_sham_read(dd, offset) & bit)) { if (time_is_before_jiffies(timeout)) return -ETIMEDOUT; } return 0; } static void omap_sham_copy_hash(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); u32 *hash = (u32 *)ctx->digest; int i; /* MD5 is almost unused. So copy sha1 size to reduce code */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { if (out) hash[i] = omap_sham_read(ctx->dd, SHA_REG_DIGEST(i)); else omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), hash[i]); } } static void omap_sham_copy_ready_hash(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); u32 *in = (u32 *)ctx->digest; u32 *hash = (u32 *)req->result; int i; if (!hash) return; if (likely(ctx->flags & BIT(FLAGS_SHA1))) { /* SHA1 results are in big endian */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) hash[i] = be32_to_cpu(in[i]); } else { /* MD5 results are in little endian */ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) hash[i] = le32_to_cpu(in[i]); } } static int omap_sham_hw_init(struct omap_sham_dev *dd) { clk_enable(dd->iclk); if (!test_bit(FLAGS_INIT, &dd->flags)) { omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, SHA_REG_SYSSTATUS_RESETDONE)) return -ETIMEDOUT; set_bit(FLAGS_INIT, &dd->flags); dd->err = 0; } return 0; } static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, int final, int dma) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); u32 val = length << 5, mask; if (likely(ctx->digcnt)) omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); /* * Setting ALGO_CONST only for the first iteration * and CLOSE_HASH only for the last one. */ if (ctx->flags & BIT(FLAGS_SHA1)) val |= SHA_REG_CTRL_ALGO; if (!ctx->digcnt) val |= SHA_REG_CTRL_ALGO_CONST; if (final) val |= SHA_REG_CTRL_CLOSE_HASH; mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); } static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int count, len32; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); omap_sham_write_ctrl(dd, length, final, 0); /* should be non-zero before next lines to disable clocks later */ ctx->digcnt += length; if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) return -ETIMEDOUT; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ set_bit(FLAGS_CPU, &dd->flags); len32 = DIV_ROUND_UP(length, sizeof(u32)); for (count = 0; count < len32; count++) omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); return -EINPROGRESS; } static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int len32; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); len32 = DIV_ROUND_UP(length, sizeof(u32)); omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC_PREFETCH); omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); omap_sham_write_ctrl(dd, length, final, 1); ctx->digcnt += length; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ set_bit(FLAGS_DMA_ACTIVE, &dd->flags); omap_start_dma(dd->dma_lch); return -EINPROGRESS; } static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, const u8 *data, size_t length) { size_t count = min(length, ctx->buflen - ctx->bufcnt); count = min(count, ctx->total); if (count <= 0) return 0; memcpy(ctx->buffer + ctx->bufcnt, data, count); ctx->bufcnt += count; return count; } static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) { size_t count; while (ctx->sg) { count = omap_sham_append_buffer(ctx, sg_virt(ctx->sg) + ctx->offset, ctx->sg->length - ctx->offset); if (!count) break; ctx->offset += count; ctx->total -= count; if (ctx->offset == ctx->sg->length) { ctx->sg = sg_next(ctx->sg); if (ctx->sg) ctx->offset = 0; else ctx->total = 0; } } return 0; } static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, struct omap_sham_reqctx *ctx, size_t length, int final) { ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, DMA_TO_DEVICE); if (dma_mapping_error(dd->dev, ctx->dma_addr)) { dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); return -EINVAL; } ctx->flags &= ~BIT(FLAGS_SG); /* next call does not fail... so no unmap in the case of error */ return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); } static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int final; size_t count; omap_sham_append_sg(ctx); final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", ctx->bufcnt, ctx->digcnt, final); if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { count = ctx->bufcnt; ctx->bufcnt = 0; return omap_sham_xmit_dma_map(dd, ctx, count, final); } return 0; } /* Start address alignment */ #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) /* SHA1 block size alignment */ #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int length, final, tail; struct scatterlist *sg; if (!ctx->total) return 0; if (ctx->bufcnt || ctx->offset) return omap_sham_update_dma_slow(dd); dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", ctx->digcnt, ctx->bufcnt, ctx->total); sg = ctx->sg; if (!SG_AA(sg)) return omap_sham_update_dma_slow(dd); if (!sg_is_last(sg) && !SG_SA(sg)) /* size is not SHA1_BLOCK_SIZE aligned */ return omap_sham_update_dma_slow(dd); length = min(ctx->total, sg->length); if (sg_is_last(sg)) { if (!(ctx->flags & BIT(FLAGS_FINUP))) { /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ tail = length & (SHA1_MD5_BLOCK_SIZE - 1); /* without finup() we need one block to close hash */ if (!tail) tail = SHA1_MD5_BLOCK_SIZE; length -= tail; } } if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { dev_err(dd->dev, "dma_map_sg error\n"); return -EINVAL; } ctx->flags |= BIT(FLAGS_SG); ctx->total -= length; ctx->offset = length; /* offset where to start slow */ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; /* next call does not fail... so no unmap in the case of error */ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); } static int omap_sham_update_cpu(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int bufcnt; omap_sham_append_sg(ctx); bufcnt = ctx->bufcnt; ctx->bufcnt = 0; return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); } static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_stop_dma(dd->dma_lch); if (ctx->flags & BIT(FLAGS_SG)) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); if (ctx->sg->length == ctx->offset) { ctx->sg = sg_next(ctx->sg); if (ctx->sg) ctx->offset = 0; } } else { dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, DMA_TO_DEVICE); } return 0; } static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = NULL, *tmp; spin_lock_bh(&sham.lock); if (!tctx->dd) { list_for_each_entry(tmp, &sham.dev_list, list) { dd = tmp; break; } tctx->dd = dd; } else { dd = tctx->dd; } spin_unlock_bh(&sham.lock); ctx->dd = dd; ctx->flags = 0; dev_dbg(dd->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) ctx->flags |= BIT(FLAGS_SHA1); ctx->bufcnt = 0; ctx->digcnt = 0; ctx->buflen = BUFLEN; if (tctx->flags & BIT(FLAGS_HMAC)) { struct omap_sham_hmac_ctx *bctx = tctx->base; memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; ctx->flags |= BIT(FLAGS_HMAC); } return 0; } static int omap_sham_update_req(struct omap_sham_dev *dd) { struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err; dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); if (ctx->flags & BIT(FLAGS_CPU)) err = omap_sham_update_cpu(dd); else err = omap_sham_update_dma_start(dd); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); return err; } static int omap_sham_final_req(struct omap_sham_dev *dd) { struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err = 0, use_dma = 1; if (ctx->bufcnt <= 64) /* faster to handle last block with cpu */ use_dma = 0; if (use_dma) err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); else err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); ctx->bufcnt = 0; dev_dbg(dd->dev, "final_req: err: %d\n", err); return err; } static int omap_sham_finish_hmac(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); struct { struct shash_desc shash; char ctx[crypto_shash_descsize(bctx->shash)]; } desc; desc.shash.tfm = bctx->shash; desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?: crypto_shash_finup(&desc.shash, req->result, ds, req->result); } static int omap_sham_finish(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; int err = 0; if (ctx->digcnt) { omap_sham_copy_ready_hash(req); if (ctx->flags & BIT(FLAGS_HMAC)) err = omap_sham_finish_hmac(req); } dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); return err; } static void omap_sham_finish_req(struct ahash_request *req, int err) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; if (!err) { omap_sham_copy_hash(req, 1); if (test_bit(FLAGS_FINAL, &dd->flags)) err = omap_sham_finish(req); } else { ctx->flags |= BIT(FLAGS_ERROR); } /* atomic operation is not needed here */ dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); clk_disable(dd->iclk); if (req->base.complete) req->base.complete(&req->base, err); /* handle new request */ tasklet_schedule(&dd->done_task); } static int omap_sham_handle_queue(struct omap_sham_dev *dd, struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; unsigned long flags; int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = ahash_enqueue_request(&dd->queue, req); if (test_bit(FLAGS_BUSY, &dd->flags)) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); if (async_req) set_bit(FLAGS_BUSY, &dd->flags); spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); dd->req = req; ctx = ahash_request_ctx(req); dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); err = omap_sham_hw_init(dd); if (err) goto err1; omap_set_dma_dest_params(dd->dma_lch, 0, OMAP_DMA_AMODE_CONSTANT, dd->phys_base + SHA_REG_DIN(0), 0, 16); omap_set_dma_dest_burst_mode(dd->dma_lch, OMAP_DMA_DATA_BURST_16); omap_set_dma_src_burst_mode(dd->dma_lch, OMAP_DMA_DATA_BURST_4); if (ctx->digcnt) /* request has changed - restore hash */ omap_sham_copy_hash(req, 0); if (ctx->op == OP_UPDATE) { err = omap_sham_update_req(dd); if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) /* no final() after finup() */ err = omap_sham_final_req(dd); } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } err1: if (err != -EINPROGRESS) /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); dev_dbg(dd->dev, "exit, err: %d\n", err); return ret; } static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_dev *dd = tctx->dd; ctx->op = op; return omap_sham_handle_queue(dd, req); } static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); if (!req->nbytes) return 0; ctx->total = req->nbytes; ctx->sg = req->src; ctx->offset = 0; if (ctx->flags & BIT(FLAGS_FINUP)) { if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { /* * OMAP HW accel works only with buffers >= 9 * will switch to bypass in final() * final has the same request and data */ omap_sham_append_sg(ctx); return 0; } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { /* * faster to use CPU for short transfers */ ctx->flags |= BIT(FLAGS_CPU); } } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); return 0; } return omap_sham_enqueue(req, OP_UPDATE); } static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, const u8 *data, unsigned int len, u8 *out) { struct { struct shash_desc shash; char ctx[crypto_shash_descsize(shash)]; } desc; desc.shash.tfm = shash; desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_digest(&desc.shash, data, len, out); } static int omap_sham_final_shash(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); return omap_sham_shash_digest(tctx->fallback, req->base.flags, ctx->buffer, ctx->bufcnt, req->result); } static int omap_sham_final(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); ctx->flags |= BIT(FLAGS_FINUP); if (ctx->flags & BIT(FLAGS_ERROR)) return 0; /* uncompleted hash is not needed */ /* OMAP HW accel works only with buffers >= 9 */ /* HMAC is always >= 9 because ipad == block size */ if ((ctx->digcnt + ctx->bufcnt) < 9) return omap_sham_final_shash(req); else if (ctx->bufcnt) return omap_sham_enqueue(req, OP_FINAL); /* copy ready hash (+ finalize hmac) */ return omap_sham_finish(req); } static int omap_sham_finup(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err1, err2; ctx->flags |= BIT(FLAGS_FINUP); err1 = omap_sham_update(req); if (err1 == -EINPROGRESS || err1 == -EBUSY) return err1; /* * final() has to be always called to cleanup resources * even if udpate() failed, except EINPROGRESS */ err2 = omap_sham_final(req); return err1 ?: err2; } static int omap_sham_digest(struct ahash_request *req) { return omap_sham_init(req) ?: omap_sham_finup(req); } static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); int err, i; err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; if (keylen > bs) { err = omap_sham_shash_digest(bctx->shash, crypto_shash_get_flags(bctx->shash), key, keylen, bctx->ipad); if (err) return err; keylen = ds; } else { memcpy(bctx->ipad, key, keylen); } memset(bctx->ipad + keylen, 0, bs - keylen); memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { bctx->ipad[i] ^= 0x36; bctx->opad[i] ^= 0x5c; } return err; } static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); const char *alg_name = crypto_tfm_alg_name(tfm); /* Allocate a fallback and abort if it failed. */ tctx->fallback = crypto_alloc_shash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tctx->fallback)) { pr_err("omap-sham: fallback driver '%s' " "could not be loaded.\n", alg_name); return PTR_ERR(tctx->fallback); } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct omap_sham_reqctx) + BUFLEN); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; tctx->flags |= BIT(FLAGS_HMAC); bctx->shash = crypto_alloc_shash(alg_base, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(bctx->shash)) { pr_err("omap-sham: base driver '%s' " "could not be loaded.\n", alg_base); crypto_free_shash(tctx->fallback); return PTR_ERR(bctx->shash); } } return 0; } static int omap_sham_cra_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, NULL); } static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "sha1"); } static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "md5"); } static void omap_sham_cra_exit(struct crypto_tfm *tfm) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); crypto_free_shash(tctx->fallback); tctx->fallback = NULL; if (tctx->flags & BIT(FLAGS_HMAC)) { struct omap_sham_hmac_ctx *bctx = tctx->base; crypto_free_shash(bctx->shash); } } static struct ahash_alg algs[] = { { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "sha1", .cra_driver_name = "omap-sha1", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "md5", .cra_driver_name = "omap-md5", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "omap-hmac-sha1", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "omap-hmac-md5", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, } } }; static void omap_sham_done_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; int err = 0; if (!test_bit(FLAGS_BUSY, &dd->flags)) { omap_sham_handle_queue(dd, NULL); return; } if (test_bit(FLAGS_CPU, &dd->flags)) { if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) goto finish; } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); if (dd->err) { err = dd->err; goto finish; } } if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { /* hash or semi-hash ready */ clear_bit(FLAGS_DMA_READY, &dd->flags); err = omap_sham_update_dma_start(dd); if (err != -EINPROGRESS) goto finish; } } return; finish: dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ omap_sham_finish_req(dd->req, err); } static irqreturn_t omap_sham_irq(int irq, void *dev_id) { struct omap_sham_dev *dd = dev_id; if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) /* final -> allow device to go to power-saving mode */ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, SHA_REG_CTRL_OUTPUT_READY); omap_sham_read(dd, SHA_REG_CTRL); if (!test_bit(FLAGS_BUSY, &dd->flags)) { dev_warn(dd->dev, "Interrupt when no active requests.\n"); return IRQ_HANDLED; } set_bit(FLAGS_OUTPUT_READY, &dd->flags); tasklet_schedule(&dd->done_task); return IRQ_HANDLED; } static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) { struct omap_sham_dev *dd = data; if (ch_status != OMAP_DMA_BLOCK_IRQ) { pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); dd->err = -EIO; clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ } set_bit(FLAGS_DMA_READY, &dd->flags); tasklet_schedule(&dd->done_task); } static int omap_sham_dma_init(struct omap_sham_dev *dd) { int err; dd->dma_lch = -1; err = omap_request_dma(dd->dma, dev_name(dd->dev), omap_sham_dma_callback, dd, &dd->dma_lch); if (err) { dev_err(dd->dev, "Unable to request DMA channel\n"); return err; } return 0; } static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) { if (dd->dma_lch >= 0) { omap_free_dma(dd->dma_lch); dd->dma_lch = -1; } } static int __devinit omap_sham_probe(struct platform_device *pdev) { struct omap_sham_dev *dd; struct device *dev = &pdev->dev; struct resource *res; int err, i, j; dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); err = -ENOMEM; goto data_err; } dd->dev = dev; platform_set_drvdata(pdev, dd); INIT_LIST_HEAD(&dd->list); spin_lock_init(&dd->lock); tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); dd->irq = -1; /* Get the base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "no MEM resource info\n"); err = -ENODEV; goto res_err; } dd->phys_base = res->start; /* Get the DMA */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(dev, "no DMA resource info\n"); err = -ENODEV; goto res_err; } dd->dma = res->start; /* Get the IRQ */ dd->irq = platform_get_irq(pdev, 0); if (dd->irq < 0) { dev_err(dev, "no IRQ resource info\n"); err = dd->irq; goto res_err; } err = request_irq(dd->irq, omap_sham_irq, IRQF_TRIGGER_LOW, dev_name(dev), dd); if (err) { dev_err(dev, "unable to request irq.\n"); goto res_err; } err = omap_sham_dma_init(dd); if (err) goto dma_err; /* Initializing the clock */ dd->iclk = clk_get(dev, "ick"); if (IS_ERR(dd->iclk)) { dev_err(dev, "clock intialization failed.\n"); err = PTR_ERR(dd->iclk); goto clk_err; } dd->io_base = ioremap(dd->phys_base, SZ_4K); if (!dd->io_base) { dev_err(dev, "can't ioremap\n"); err = -ENOMEM; goto io_err; } clk_enable(dd->iclk); dev_info(dev, "hw accel on OMAP rev %u.%u\n", (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); clk_disable(dd->iclk); spin_lock(&sham.lock); list_add_tail(&dd->list, &sham.dev_list); spin_unlock(&sham.lock); for (i = 0; i < ARRAY_SIZE(algs); i++) { err = crypto_register_ahash(&algs[i]); if (err) goto err_algs; } return 0; err_algs: for (j = 0; j < i; j++) crypto_unregister_ahash(&algs[j]); iounmap(dd->io_base); io_err: clk_put(dd->iclk); clk_err: omap_sham_dma_cleanup(dd); dma_err: if (dd->irq >= 0) free_irq(dd->irq, dd); res_err: kfree(dd); dd = NULL; data_err: dev_err(dev, "initialization failed.\n"); return err; } static int __devexit omap_sham_remove(struct platform_device *pdev) { static struct omap_sham_dev *dd; int i; dd = platform_get_drvdata(pdev); if (!dd) return -ENODEV; spin_lock(&sham.lock); list_del(&dd->list); spin_unlock(&sham.lock); for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_ahash(&algs[i]); tasklet_kill(&dd->done_task); iounmap(dd->io_base); clk_put(dd->iclk); omap_sham_dma_cleanup(dd); if (dd->irq >= 0) free_irq(dd->irq, dd); kfree(dd); dd = NULL; return 0; } static struct platform_driver omap_sham_driver = { .probe = omap_sham_probe, .remove = omap_sham_remove, .driver = { .name = "omap-sham", .owner = THIS_MODULE, }, }; static int __init omap_sham_mod_init(void) { pr_info("loading %s driver\n", "omap-sham"); if (!cpu_class_is_omap2() || (omap_type() != OMAP2_DEVICE_TYPE_SEC && omap_type() != OMAP2_DEVICE_TYPE_EMU)) { pr_err("Unsupported cpu\n"); return -ENODEV; } return platform_driver_register(&omap_sham_driver); } static void __exit omap_sham_mod_exit(void) { platform_driver_unregister(&omap_sham_driver); } module_init(omap_sham_mod_init); module_exit(omap_sham_mod_exit); MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Dmitry Kasatkin");
gpl-2.0
MassStash/m8whl_sense
drivers/staging/cxt1e1/comet_tables.c
7802
28568
/*----------------------------------------------------------------------------- * comet_tables.c - waveform tables for the PM4351 'COMET' * * Copyright (C) 2003-2005 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For further information, contact via email: support@sbei.com * SBE, Inc. San Ramon, California U.S.A. *----------------------------------------------------------------------------- */ #include <linux/types.h> /***************************************************************************** * * Array names: * * TWVLongHaul0DB * TWVLongHaul7_5DB * TWVLongHaul15DB * TWVLongHaul22_5DB * TWVShortHaul0 * TWVShortHaul1 * TWVShortHaul2 * TWVShortHaul3 * TWVShortHaul4 * TWVShortHaul5 * TWV_E1_120Ohm * TWV_E1_75Ohm <not supported> * T1_Equalizer * E1_Equalizer * *****************************************************************************/ u_int8_t TWVLongHaul0DB[25][5] =/* T1 Long Haul 0 DB */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x32, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3E, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3D, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x4C, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* PMC's suggested value */ /* {0x14} Output Amplitude */ }; u_int8_t TWVLongHaul7_5DB[25][5] = /* T1 Long Haul 7.5 DB */ { {0x00, 0x10, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x01, 0x0E, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x02, 0x0C, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x04, 0x0A, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x08, 0x08, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x0C, 0x06, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x10, 0x04, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x16, 0x02, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x1A, 0x01, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x1E, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x22, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x20, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x18, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x12, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x07} /* PMC's suggested value */ /* { 0x0A } Output Amplitude */ }; u_int8_t TWVLongHaul15DB[25][5] = /* T1 Long Haul 15 DB */ { {0x00, 0x2A, 0x09, 0x01, 0x00}, /* Sample 0 */ {0x00, 0x28, 0x08, 0x01, 0x00}, /* Sample 1 */ {0x00, 0x26, 0x08, 0x01, 0x00}, /* Sample 2 */ {0x00, 0x24, 0x07, 0x01, 0x00}, /* Sample 3 */ {0x01, 0x22, 0x07, 0x01, 0x00}, /* Sample 4 */ {0x02, 0x20, 0x06, 0x01, 0x00}, /* Sample 5 */ {0x04, 0x1E, 0x06, 0x01, 0x00}, /* Sample 6 */ {0x07, 0x1C, 0x05, 0x00, 0x00}, /* Sample 7 */ {0x0A, 0x1B, 0x05, 0x00, 0x00}, /* Sample 8 */ {0x0D, 0x19, 0x05, 0x00, 0x00}, /* Sample 9 */ {0x10, 0x18, 0x04, 0x00, 0x00}, /* Sample 10 */ {0x14, 0x16, 0x04, 0x00, 0x00}, /* Sample 11 */ {0x18, 0x15, 0x04, 0x00, 0x00}, /* Sample 12 */ {0x1B, 0x13, 0x03, 0x00, 0x00}, /* Sample 13 */ {0x1E, 0x12, 0x03, 0x00, 0x00}, /* Sample 14 */ {0x21, 0x10, 0x03, 0x00, 0x00}, /* Sample 15 */ {0x24, 0x0F, 0x03, 0x00, 0x00}, /* Sample 16 */ {0x27, 0x0D, 0x03, 0x00, 0x00}, /* Sample 17 */ {0x2A, 0x0D, 0x02, 0x00, 0x00}, /* Sample 18 */ {0x2D, 0x0B, 0x02, 0x00, 0x00}, /* Sample 19 */ {0x30, 0x0B, 0x02, 0x00, 0x00}, /* Sample 20 */ {0x30, 0x0A, 0x02, 0x00, 0x00}, /* Sample 21 */ {0x2E, 0x0A, 0x02, 0x00, 0x00}, /* Sample 22 */ {0x2C, 0x09, 0x02, 0x00, 0x00}, /* Sample 23 */ {0x03} /* Output Amplitude */ }; u_int8_t TWVLongHaul22_5DB[25][5] = /* T1 Long Haul 22.5 DB */ { {0x00, 0x1F, 0x16, 0x06, 0x01}, /* Sample 0 */ {0x00, 0x20, 0x15, 0x05, 0x01}, /* Sample 1 */ {0x00, 0x21, 0x15, 0x05, 0x01}, /* Sample 2 */ {0x00, 0x22, 0x14, 0x05, 0x01}, /* Sample 3 */ {0x00, 0x22, 0x13, 0x04, 0x00}, /* Sample 4 */ {0x00, 0x23, 0x12, 0x04, 0x00}, /* Sample 5 */ {0x01, 0x23, 0x12, 0x04, 0x00}, /* Sample 6 */ {0x01, 0x24, 0x11, 0x03, 0x00}, /* Sample 7 */ {0x01, 0x23, 0x10, 0x03, 0x00}, /* Sample 8 */ {0x02, 0x23, 0x10, 0x03, 0x00}, /* Sample 9 */ {0x03, 0x22, 0x0F, 0x03, 0x00}, /* Sample 10 */ {0x05, 0x22, 0x0E, 0x03, 0x00}, /* Sample 11 */ {0x07, 0x21, 0x0E, 0x02, 0x00}, /* Sample 12 */ {0x09, 0x20, 0x0D, 0x02, 0x00}, /* Sample 13 */ {0x0B, 0x1E, 0x0C, 0x02, 0x00}, /* Sample 14 */ {0x0E, 0x1D, 0x0C, 0x02, 0x00}, /* Sample 15 */ {0x10, 0x1B, 0x0B, 0x02, 0x00}, /* Sample 16 */ {0x13, 0x1B, 0x0A, 0x02, 0x00}, /* Sample 17 */ {0x15, 0x1A, 0x0A, 0x02, 0x00}, /* Sample 18 */ {0x17, 0x19, 0x09, 0x01, 0x00}, /* Sample 19 */ {0x19, 0x19, 0x08, 0x01, 0x00}, /* Sample 20 */ {0x1B, 0x18, 0x08, 0x01, 0x00}, /* Sample 21 */ {0x1D, 0x17, 0x07, 0x01, 0x00}, /* Sample 22 */ {0x1E, 0x17, 0x06, 0x01, 0x00}, /* Sample 23 */ {0x02} /* Output Amplitude */ }; u_int8_t TWVShortHaul0[25][5] = /* T1 Short Haul 0 - 110 ft */ { {0x00, 0x45, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x59, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x55, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4D, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x48, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* Output Amplitude */ }; u_int8_t TWVShortHaul1[25][5] = /* T1 Short Haul 110 - 220 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x36, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x34, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x68, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x54, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x10} /* Output Amplitude */ }; u_int8_t TWVShortHaul2[25][5] = /* T1 Short Haul 220 - 330 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3A, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3A, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x38, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x23, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x6C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x11} /* Output Amplitude */ }; u_int8_t TWVShortHaul3[25][5] = /* T1 Short Haul 330 - 440 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2E, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x19, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x12} /* Output Amplitude */ }; u_int8_t TWVShortHaul4[25][5] = /* T1 Short Haul 440 - 550 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x14} /* Output Amplitude */ }; u_int8_t TWVShortHaul5[25][5] = /* T1 Short Haul 550 - 660 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x25, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x5F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x15} /* Output Amplitude */ }; u_int8_t TWV_E1_120Ohm[25][5] = /* E1 120 Ohm */ { {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* PMC's suggested value */ /* { 0x10 } Output Amplitude */ }; u_int8_t TWV_E1_75Ohm[25][5] = /* E1 75 Ohm */ { #ifdef PMCC4_DOES_NOT_SUPPORT {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x32, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ #endif {0x0C} /* Output Amplitude */ }; u_int32_t T1_Equalizer[256] = /* T1 Receiver Equalizer */ { 0x03FE1840, 0x03F61840, 0x03EE1840, 0x03E61840, /* 000 - 003 */ 0x03DE1840, 0x03D61840, 0x03D61840, 0x03D61840, /* 004 - 007 */ 0x03CE1840, 0x03CE1840, 0x03CE1840, 0x03CE1840, /* 008 - 011 */ 0x03C61840, 0x03C61840, 0x03C61840, 0x0BBE1840, /* 012 - 015 */ 0x0BBE1840, 0x0BBE1840, 0x0BBE1840, 0x0BB61840, /* 016 - 019 */ 0x0BB61840, 0x0BB61840, 0x0BB61840, 0x13AE1838, /* 020 - 023 */ 0x13AE183C, 0x13AE1840, 0x13AE1840, 0x13AE1840, /* 024 - 027 */ 0x13AE1840, 0x1BB618B8, 0x1BAE18B8, 0x1BAE18BC, /* 028 - 031 */ 0x1BAE18C0, 0x1BAE18C0, 0x23A618C0, 0x23A618C0, /* 032 - 035 */ 0x23A618C0, 0x23A618C0, 0x23A618C0, 0x239E18C0, /* 036 - 039 */ 0x239E18C0, 0x239E18C0, 0x239E18C0, 0x239E18C0, /* 040 - 043 */ 0x2B9618C0, 0x2B9618C0, 0x2B9618C0, 0x33961940, /* 044 - 047 */ 0x37961940, 0x37961940, 0x37961940, 0x3F9E19C0, /* 048 - 051 */ 0x3F9E19C0, 0x3F9E19C0, 0x3FA61A40, 0x3FA61A40, /* 052 - 055 */ 0x3FA61A40, 0x3FA61A40, 0x3F9619C0, 0x3F9619C0, /* 056 - 059 */ 0x3F9619C0, 0x3F9619C0, 0x479E1A40, 0x479E1A40, /* 060 - 063 */ 0x479E1A40, 0x47961A40, 0x47961A40, 0x47961A40, /* 064 - 067 */ 0x47961A40, 0x4F8E1A40, 0x4F8E1A40, 0x4F8E1A40, /* 068 - 071 */ 0x4F8E1A40, 0x4F8E1A40, 0x57861A40, 0x57861A40, /* 072 - 075 */ 0x57861A40, 0x57861A40, 0x57861A40, 0x5F861AC0, /* 076 - 079 */ 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, /* 080 - 083 */ 0x5F861AC0, 0x5F7E1AC0, 0x5F7E1AC0, 0x5F7E1AC0, /* 084 - 087 */ 0x5F7E1AC0, 0x5F7E1AC0, 0x677E2AC0, 0x677E2AC0, /* 088 - 091 */ 0x677E2AC0, 0x677E2AC0, 0x67762AC0, 0x67762AC0, /* 092 - 095 */ 0x67762AC0, 0x67762AC0, 0x67762AC0, 0x6F6E2AC0, /* 096 - 099 */ 0x6F6E2AC0, 0x6F6E2AC0, 0x6F6E2AC0, 0x776E3AC0, /* 100 - 103 */ 0x776E3AC0, 0x776E3AC0, 0x776E3AC0, 0x7F663AC0, /* 104 - 107 */ 0x7F663AC0, 0x7F664AC0, 0x7F664AC0, 0x7F664AC0, /* 108 - 111 */ 0x7F664AC0, 0x87665AC0, 0x87665AC0, 0x87665AC0, /* 112 - 115 */ 0x87665AC0, 0x87665AC0, 0x875E5AC0, 0x875E5AC0, /* 116 - 119 */ 0x875E5AC0, 0x875E5AC0, 0x875E5AC0, 0x8F5E6AC0, /* 120 - 123 */ 0x8F5E6AC0, 0x8F5E6AC0, 0x8F5E6AC0, 0x975E7AC0, /* 124 - 127 */ 0x975E7AC0, 0x975E7AC0, 0x975E7AC0, 0x9F5E8AC0, /* 128 - 131 */ 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, /* 132 - 135 */ 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, /* 136 - 139 */ 0xA756AAC0, 0xA756AAC0, 0xA756AAC0, 0xAF4EAAC0, /* 140 - 143 */ 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, /* 144 - 147 */ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, /* 148 - 151 */ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746BAC0, /* 152 - 155 */ 0xB746BAC0, 0xB746BAC0, 0xBF4EBB40, 0xBF4EBB40, /* 156 - 159 */ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, /* 160 - 163 */ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBE46CB40, /* 164 - 167 */ 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, /* 168 - 171 */ 0xBE46CB40, 0xBE46DB40, 0xBE46DB40, 0xBE46DB40, /* 172 - 175 */ 0xC63ECB40, 0xC63ECB40, 0xC63EDB40, 0xC63EDB40, /* 176 - 179 */ 0xC63EDB40, 0xC644DB40, 0xC644DB40, 0xC644DB40, /* 180 - 183 */ 0xC644DB40, 0xC63CDB40, 0xC63CDB40, 0xC63CDB40, /* 184 - 187 */ 0xC63CDB40, 0xD634DB40, 0xD634DB40, 0xD634DB40, /* 188 - 191 */ 0xD634DB40, 0xD634DB40, 0xDE2CDB3C, 0xDE2CDB3C, /* 192 - 195 */ 0xDE2CDB3C, 0xE62CDB40, 0xE62CDB40, 0xE62CDB40, /* 196 - 199 */ 0xE62CDB40, 0xE62CDB40, 0xE62CEB40, 0xE62CEB40, /* 200 - 203 */ 0xE62CEB40, 0xEE2CFB40, 0xEE2CFB40, 0xEE2CFB40, /* 204 - 207 */ 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, /* 208 - 211 */ 0xEE2D0B40, 0xF5250B38, 0xF5250B3C, 0xF5250B40, /* 212 - 215 */ 0xF5251B40, 0xF5251B40, 0xF5251B40, 0xF5251B40, /* 216 - 219 */ 0xF5251B40, 0xFD252B40, 0xFD252B40, 0xFD252B40, /* 220 - 223 */ 0xFD252B40, 0xFD252740, 0xFD252740, 0xFD252740, /* 224 - 227 */ 0xFD252340, 0xFD252340, 0xFD252340, 0xFD253340, /* 228 - 231 */ 0xFD253340, 0xFD253340, 0xFD253340, 0xFD253340, /* 232 - 235 */ 0xFD253340, 0xFD253340, 0xFD253340, 0xFC254340, /* 236 - 239 */ 0xFD254340, 0xFD254340, 0xFD254344, 0xFC254348, /* 240 - 243 */ 0xFC25434C, 0xFD2543BC, 0xFD2543C0, 0xFC2543C0, /* 244 - 247 */ 0xFC2343C0, 0xFC2343C0, 0xFD2343C0, 0xFC2143C0, /* 248 - 251 */ 0xFC2143C0, 0xFC2153C0, 0xFD2153C0, 0xFC2153C0 /* 252 - 255 */ }; u_int32_t E1_Equalizer[256] = /* E1 Receiver Equalizer */ { 0x07DE182C, 0x07DE182C, 0x07D6182C, 0x07D6182C, /* 000 - 003 */ 0x07D6182C, 0x07CE182C, 0x07CE182C, 0x07CE182C, /* 004 - 007 */ 0x07C6182C, 0x07C6182C, 0x07C6182C, 0x07BE182C, /* 008 - 011 */ 0x07BE182C, 0x07BE182C, 0x07BE182C, 0x07BE182C, /* 012 - 015 */ 0x07B6182C, 0x07B6182C, 0x07B6182C, 0x07B6182C, /* 016 - 019 */ 0x07B6182C, 0x07AE182C, 0x07AE182C, 0x07AE182C, /* 020 - 023 */ 0x07AE182C, 0x07AE182C, 0x07B618AC, 0x07AE18AC, /* 024 - 027 */ 0x07AE18AC, 0x07AE18AC, 0x07AE18AC, 0x07A618AC, /* 028 - 031 */ 0x07A618AC, 0x07A618AC, 0x07A618AC, 0x079E18AC, /* 032 - 035 */ 0x07A6192C, 0x07A6192C, 0x07A6192C, 0x0FA6192C, /* 036 - 039 */ 0x0FA6192C, 0x0F9E192C, 0x0F9E192C, 0x0F9E192C, /* 040 - 043 */ 0x179E192C, 0x17A619AC, 0x179E19AC, 0x179E19AC, /* 044 - 047 */ 0x179619AC, 0x1F9619AC, 0x1F9619AC, 0x1F8E19AC, /* 048 - 051 */ 0x1F8E19AC, 0x1F8E19AC, 0x278E19AC, 0x278E1A2C, /* 052 - 055 */ 0x278E1A2C, 0x278E1A2C, 0x278E1A2C, 0x2F861A2C, /* 056 - 059 */ 0x2F861A2C, 0x2F861A2C, 0x2F7E1A2C, 0x2F7E1A2C, /* 060 - 063 */ 0x2F7E1A2C, 0x377E1A2C, 0x377E1AAC, 0x377E1AAC, /* 064 - 067 */ 0x377E1AAC, 0x377E1AAC, 0x3F7E2AAC, 0x3F7E2AAC, /* 068 - 071 */ 0x3F762AAC, 0x3F862B2C, 0x3F7E2B2C, 0x477E2B2C, /* 072 - 075 */ 0x477E2F2C, 0x477E2F2C, 0x477E2F2C, 0x47762F2C, /* 076 - 079 */ 0x4F762F2C, 0x4F762F2C, 0x4F6E2F2C, 0x4F6E2F2C, /* 080 - 083 */ 0x4F6E2F2C, 0x576E2F2C, 0x576E2F2C, 0x576E3F2C, /* 084 - 087 */ 0x576E3F2C, 0x576E3F2C, 0x5F6E3F2C, 0x5F6E4F2C, /* 088 - 091 */ 0x5F6E4F2C, 0x5F6E4F2C, 0x5F664F2C, 0x67664F2C, /* 092 - 095 */ 0x67664F2C, 0x675E4F2C, 0x675E4F2C, 0x67664F2C, /* 096 - 099 */ 0x67664F2C, 0x67665F2C, 0x6F6E5F2C, 0x6F6E6F2C, /* 100 - 103 */ 0x6F6E6F2C, 0x6F6E7F2C, 0x6F6E7F2C, 0x6F6E7F2C, /* 104 - 107 */ 0x77667F2C, 0x77667F2C, 0x775E6F2C, 0x775E7F2C, /* 108 - 111 */ 0x775E7F2C, 0x7F5E7F2C, 0x7F5E8F2C, 0x7F5E8F2C, /* 112 - 115 */ 0x7F5E8F2C, 0x87568F2C, 0x87568F2C, 0x87568F2C, /* 116 - 119 */ 0x874E8F2C, 0x874E8F2C, 0x874E8F2C, 0x8F4E9F2C, /* 120 - 123 */ 0x8F4E9F2C, 0x8F4EAF2C, 0x8F4EAF2C, 0x8F4EAF2C, /* 124 - 127 */ 0x974EAF2C, 0x974EAF2C, 0x974EAB2C, 0x974EAB2C, /* 128 - 131 */ 0x974EAB2C, 0x9F4EAB2C, 0x9F4EBB2C, 0x9F4EBB2C, /* 132 - 135 */ 0x9F4EBB2C, 0x9F4ECB2C, 0xA74ECB2C, 0xA74ECB2C, /* 136 - 139 */ 0xA746CB2C, 0xA746CB2C, 0xA746CB2C, 0xA746DB2C, /* 140 - 143 */ 0xAF46DB2C, 0xAF46EB2C, 0xAF46EB2C, 0xAF4EEB2C, /* 144 - 147 */ 0xAE4EEB2C, 0xAE4EEB2C, 0xB546FB2C, 0xB554FB2C, /* 148 - 151 */ 0xB54CEB2C, 0xB554FB2C, 0xB554FB2C, 0xBD54FB2C, /* 152 - 155 */ 0xBD4CFB2C, 0xBD4CFB2C, 0xBD4CFB2C, 0xBD44EB2C, /* 156 - 159 */ 0xC544FB2C, 0xC544FB2C, 0xC544FB2C, 0xC5450B2C, /* 160 - 163 */ 0xC5450B2C, 0xC5450B2C, 0xCD450B2C, 0xCD450B2C, /* 164 - 167 */ 0xCD3D0B2C, 0xCD3D0B2C, 0xCD3D0B2C, 0xD53D0B2C, /* 168 - 171 */ 0xD53D0B2C, 0xD53D1B2C, 0xD53D1B2C, 0xD53D1B2C, /* 172 - 175 */ 0xDD3D1B2C, 0xDD3D1B2C, 0xDD351B2C, 0xDD351B2C, /* 176 - 179 */ 0xDD351B2C, 0xE5351B2C, 0xE5351B2C, 0xE52D1B2C, /* 180 - 183 */ 0xE52D1B2C, 0xE52D3B2C, 0xED2D4B2C, 0xED2D1BA8, /* 184 - 187 */ 0xED2D1BAC, 0xED2D17AC, 0xED2D17AC, 0xED2D27AC, /* 188 - 191 */ 0xF52D27AC, 0xF52D27AC, 0xF52D2BAC, 0xF52D2BAC, /* 192 - 195 */ 0xF52D2BAC, 0xFD2D2BAC, 0xFD2B2BAC, 0xFD2B2BAC, /* 196 - 199 */ 0xFD2B2BAC, 0xFD2B2BAC, 0xFD232BAC, 0xFD232BAC, /* 200 - 203 */ 0xFD232BAC, 0xFD212BAC, 0xFD212BAC, 0xFD292BAC, /* 204 - 207 */ 0xFD292BAC, 0xFD2927AC, 0xFD2937AC, 0xFD2923AC, /* 208 - 211 */ 0xFD2923AC, 0xFD2923AC, 0xFD2923AC, 0xFD2123AC, /* 212 - 215 */ 0xFD2123AC, 0xFD2123AC, 0xFD2133AC, 0xFD2133AC, /* 216 - 219 */ 0xFD2133AC, 0xFD2143AC, 0xFD2143AC, 0xFD2143AC, /* 220 - 223 */ 0xFC2143AC, 0xFC2143AC, 0xFC1943AC, 0xFC1943AC, /* 224 - 227 */ 0xFC1943AC, 0xFC1943AC, 0xFC1953AC, 0xFC1953AC, /* 228 - 231 */ 0xFC1953AC, 0xFC1953AC, 0xFC1963AC, 0xFC1963AC, /* 232 - 235 */ 0xFC1963AC, 0xFC1973AC, 0xFC1973AC, 0xFC1973AC, /* 236 - 239 */ 0xFC1973AC, 0xFC1973AC, 0xFC1983AC, 0xFC1983AC, /* 240 - 243 */ 0xFC1983AC, 0xFC1983AC, 0xFC1983AC, 0xFC1993AC, /* 244 - 247 */ 0xFC1993AC, 0xFC1993AC, 0xFC19A3AC, 0xFC19A3AC, /* 248 - 251 */ 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC /* 252 - 255 */ }; /*** End-of-Files ***/
gpl-2.0
BenzoPlayer/kernel_asus_fugu
drivers/mtd/rfd_ftl.c
7802
18731
/* * rfd_ftl.c -- resident flash disk (flash translation layer) * * Copyright © 2005 Sean Young <sean@mess.org> * * This type of flash translation layer (FTL) is used by the Embedded BIOS * by General Software. It is known as the Resident Flash Disk (RFD), see: * * http://www.gensw.com/pages/prod/bios/rfd.htm * * based on ftl.c */ #include <linux/hdreg.h> #include <linux/init.h> #include <linux/mtd/blktrans.h> #include <linux/mtd/mtd.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/module.h> #include <asm/types.h> static int block_size = 0; module_param(block_size, int, 0); MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); #define PREFIX "rfd_ftl: " /* This major has been assigned by device@lanana.org */ #ifndef RFD_FTL_MAJOR #define RFD_FTL_MAJOR 256 #endif /* Maximum number of partitions in an FTL region */ #define PART_BITS 4 /* An erase unit should start with this value */ #define RFD_MAGIC 0x9193 /* the second value is 0xffff or 0xffc8; function unknown */ /* the third value is always 0xffff, ignored */ /* next is an array of mapping for each corresponding sector */ #define HEADER_MAP_OFFSET 3 #define SECTOR_DELETED 0x0000 #define SECTOR_ZERO 0xfffe #define SECTOR_FREE 0xffff #define SECTOR_SIZE 512 #define SECTORS_PER_TRACK 63 struct block { enum { BLOCK_OK, BLOCK_ERASING, BLOCK_ERASED, BLOCK_UNUSED, BLOCK_FAILED } state; int free_sectors; int used_sectors; int erases; u_long offset; }; struct partition { struct mtd_blktrans_dev mbd; u_int block_size; /* size of erase unit */ u_int total_blocks; /* number of erase units */ u_int header_sectors_per_block; /* header sectors in erase unit */ u_int data_sectors_per_block; /* data sectors in erase unit */ u_int sector_count; /* sectors in translated disk */ u_int header_size; /* bytes in header sector */ int reserved_block; /* block next up for reclaim */ int current_block; /* block to write to */ u16 *header_cache; /* cached header */ int is_reclaiming; int cylinders; int errors; u_long *sector_map; struct block *blocks; }; static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); static int build_block_map(struct partition *part, int block_no) { struct block *block = &part->blocks[block_no]; int i; block->offset = part->block_size * block_no; if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { block->state = BLOCK_UNUSED; return -ENOENT; } block->state = BLOCK_OK; for (i=0; i<part->data_sectors_per_block; i++) { u16 entry; entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); if (entry == SECTOR_DELETED) continue; if (entry == SECTOR_FREE) { block->free_sectors++; continue; } if (entry == SECTOR_ZERO) entry = 0; if (entry >= part->sector_count) { printk(KERN_WARNING PREFIX "'%s': unit #%d: entry %d corrupt, " "sector %d out of range\n", part->mbd.mtd->name, block_no, i, entry); continue; } if (part->sector_map[entry] != -1) { printk(KERN_WARNING PREFIX "'%s': more than one entry for sector %d\n", part->mbd.mtd->name, entry); part->errors = 1; continue; } part->sector_map[entry] = block->offset + (i + part->header_sectors_per_block) * SECTOR_SIZE; block->used_sectors++; } if (block->free_sectors == part->data_sectors_per_block) part->reserved_block = block_no; return 0; } static int scan_header(struct partition *part) { int sectors_per_block; int i, rc = -ENOMEM; int blocks_found; size_t retlen; sectors_per_block = part->block_size / SECTOR_SIZE; part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; if (part->total_blocks < 2) return -ENOENT; /* each erase block has three bytes header, followed by the map */ part->header_sectors_per_block = ((HEADER_MAP_OFFSET + sectors_per_block) * sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; part->data_sectors_per_block = sectors_per_block - part->header_sectors_per_block; part->header_size = (HEADER_MAP_OFFSET + part->data_sectors_per_block) * sizeof(u16); part->cylinders = (part->data_sectors_per_block * (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; part->sector_count = part->cylinders * SECTORS_PER_TRACK; part->current_block = -1; part->reserved_block = -1; part->is_reclaiming = 0; part->header_cache = kmalloc(part->header_size, GFP_KERNEL); if (!part->header_cache) goto err; part->blocks = kcalloc(part->total_blocks, sizeof(struct block), GFP_KERNEL); if (!part->blocks) goto err; part->sector_map = vmalloc(part->sector_count * sizeof(u_long)); if (!part->sector_map) { printk(KERN_ERR PREFIX "'%s': unable to allocate memory for " "sector map", part->mbd.mtd->name); goto err; } for (i=0; i<part->sector_count; i++) part->sector_map[i] = -1; for (i=0, blocks_found=0; i<part->total_blocks; i++) { rc = mtd_read(part->mbd.mtd, i * part->block_size, part->header_size, &retlen, (u_char *)part->header_cache); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) goto err; if (!build_block_map(part, i)) blocks_found++; } if (blocks_found == 0) { printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n", part->mbd.mtd->name); rc = -ENOENT; goto err; } if (part->reserved_block == -1) { printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n", part->mbd.mtd->name); part->errors = 1; } return 0; err: vfree(part->sector_map); kfree(part->header_cache); kfree(part->blocks); return rc; } static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) { struct partition *part = (struct partition*)dev; u_long addr; size_t retlen; int rc; if (sector >= part->sector_count) return -EIO; addr = part->sector_map[sector]; if (addr != -1) { rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char *)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_WARNING PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); return rc; } } else memset(buf, 0, SECTOR_SIZE); return 0; } static void erase_callback(struct erase_info *erase) { struct partition *part; u16 magic; int i, rc; size_t retlen; part = (struct partition*)erase->priv; i = (u32)erase->addr / part->block_size; if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || erase->addr > UINT_MAX) { printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); return; } if (erase->state != MTD_ERASE_DONE) { printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " "state %d\n", (unsigned long long)erase->addr, part->mbd.mtd->name, erase->state); part->blocks[i].state = BLOCK_FAILED; part->blocks[i].free_sectors = 0; part->blocks[i].used_sectors = 0; kfree(erase); return; } magic = cpu_to_le16(RFD_MAGIC); part->blocks[i].state = BLOCK_ERASED; part->blocks[i].free_sectors = part->data_sectors_per_block; part->blocks[i].used_sectors = 0; part->blocks[i].erases++; rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), &retlen, (u_char *)&magic); if (!rc && retlen != sizeof(magic)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to write RFD " "header at 0x%lx\n", part->mbd.mtd->name, part->blocks[i].offset); part->blocks[i].state = BLOCK_FAILED; } else part->blocks[i].state = BLOCK_OK; kfree(erase); } static int erase_block(struct partition *part, int block) { struct erase_info *erase; int rc = -ENOMEM; erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); if (!erase) goto err; erase->mtd = part->mbd.mtd; erase->callback = erase_callback; erase->addr = part->blocks[block].offset; erase->len = part->block_size; erase->priv = (u_long)part; part->blocks[block].state = BLOCK_ERASING; part->blocks[block].free_sectors = 0; rc = mtd_erase(part->mbd.mtd, erase); if (rc) { printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " "failed\n", (unsigned long long)erase->addr, (unsigned long long)erase->len, part->mbd.mtd->name); kfree(erase); } err: return rc; } static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) { void *sector_data; u16 *map; size_t retlen; int i, rc = -ENOMEM; part->is_reclaiming = 1; sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); if (!sector_data) goto err3; map = kmalloc(part->header_size, GFP_KERNEL); if (!map) goto err2; rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, part->header_size, &retlen, (u_char *)map); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block_no].offset); goto err; } for (i=0; i<part->data_sectors_per_block; i++) { u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); u_long addr; if (entry == SECTOR_FREE || entry == SECTOR_DELETED) continue; if (entry == SECTOR_ZERO) entry = 0; /* already warned about and ignored in build_block_map() */ if (entry >= part->sector_count) continue; addr = part->blocks[block_no].offset + (i + part->header_sectors_per_block) * SECTOR_SIZE; if (*old_sector == addr) { *old_sector = -1; if (!part->blocks[block_no].used_sectors--) { rc = erase_block(part, block_no); break; } continue; } rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, sector_data); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': Unable to " "read sector for relocation\n", part->mbd.mtd->name); goto err; } rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, entry, sector_data); if (rc) goto err; } err: kfree(map); err2: kfree(sector_data); err3: part->is_reclaiming = 0; return rc; } static int reclaim_block(struct partition *part, u_long *old_sector) { int block, best_block, score, old_sector_block; int rc; /* we have a race if sync doesn't exist */ mtd_sync(part->mbd.mtd); score = 0x7fffffff; /* MAX_INT */ best_block = -1; if (*old_sector != -1) old_sector_block = *old_sector / part->block_size; else old_sector_block = -1; for (block=0; block<part->total_blocks; block++) { int this_score; if (block == part->reserved_block) continue; /* * Postpone reclaiming if there is a free sector as * more removed sectors is more efficient (have to move * less). */ if (part->blocks[block].free_sectors) return 0; this_score = part->blocks[block].used_sectors; if (block == old_sector_block) this_score--; else { /* no point in moving a full block */ if (part->blocks[block].used_sectors == part->data_sectors_per_block) continue; } this_score += part->blocks[block].erases; if (this_score < score) { best_block = block; score = this_score; } } if (best_block == -1) return -ENOSPC; part->current_block = -1; part->reserved_block = best_block; pr_debug("reclaim_block: reclaiming block #%d with %d used " "%d free sectors\n", best_block, part->blocks[best_block].used_sectors, part->blocks[best_block].free_sectors); if (part->blocks[best_block].used_sectors) rc = move_block_contents(part, best_block, old_sector); else rc = erase_block(part, best_block); return rc; } /* * IMPROVE: It would be best to choose the block with the most deleted sectors, * because if we fill that one up first it'll have the most chance of having * the least live sectors at reclaim. */ static int find_free_block(struct partition *part) { int block, stop; block = part->current_block == -1 ? jiffies % part->total_blocks : part->current_block; stop = block; do { if (part->blocks[block].free_sectors && block != part->reserved_block) return block; if (part->blocks[block].state == BLOCK_UNUSED) erase_block(part, block); if (++block >= part->total_blocks) block = 0; } while (block != stop); return -1; } static int find_writable_block(struct partition *part, u_long *old_sector) { int rc, block; size_t retlen; block = find_free_block(part); if (block == -1) { if (!part->is_reclaiming) { rc = reclaim_block(part, old_sector); if (rc) goto err; block = find_free_block(part); } if (block == -1) { rc = -ENOSPC; goto err; } } rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, part->header_size, &retlen, (u_char *)part->header_cache); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to read header at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block].offset); goto err; } part->current_block = block; err: return rc; } static int mark_sector_deleted(struct partition *part, u_long old_addr) { int block, offset, rc; u_long addr; size_t retlen; u16 del = cpu_to_le16(SECTOR_DELETED); block = old_addr / part->block_size; offset = (old_addr % part->block_size) / SECTOR_SIZE - part->header_sectors_per_block; addr = part->blocks[block].offset + (HEADER_MAP_OFFSET + offset) * sizeof(u16); rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, (u_char *)&del); if (!rc && retlen != sizeof(del)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } if (block == part->current_block) part->header_cache[offset + HEADER_MAP_OFFSET] = del; part->blocks[block].used_sectors--; if (!part->blocks[block].used_sectors && !part->blocks[block].free_sectors) rc = erase_block(part, block); err: return rc; } static int find_free_sector(const struct partition *part, const struct block *block) { int i, stop; i = stop = part->data_sectors_per_block - block->free_sectors; do { if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) == SECTOR_FREE) return i; if (++i == part->data_sectors_per_block) i = 0; } while(i != stop); return -1; } static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) { struct partition *part = (struct partition*)dev; struct block *block; u_long addr; int i; int rc; size_t retlen; u16 entry; if (part->current_block == -1 || !part->blocks[part->current_block].free_sectors) { rc = find_writable_block(part, old_addr); if (rc) goto err; } block = &part->blocks[part->current_block]; i = find_free_sector(part, block); if (i < 0) { rc = -ENOSPC; goto err; } addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + block->offset; rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char *)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } part->sector_map[sector] = addr; entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); part->header_cache[i + HEADER_MAP_OFFSET] = entry; addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, (u_char *)&entry); if (!rc && retlen != sizeof(entry)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } block->used_sectors++; block->free_sectors--; err: return rc; } static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) { struct partition *part = (struct partition*)dev; u_long old_addr; int i; int rc = 0; pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); if (part->reserved_block == -1) { rc = -EACCES; goto err; } if (sector >= part->sector_count) { rc = -EIO; goto err; } old_addr = part->sector_map[sector]; for (i=0; i<SECTOR_SIZE; i++) { if (!buf[i]) continue; rc = do_writesect(dev, sector, buf, &old_addr); if (rc) goto err; break; } if (i == SECTOR_SIZE) part->sector_map[sector] = -1; if (old_addr != -1) rc = mark_sector_deleted(part, old_addr); err: return rc; } static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct partition *part = (struct partition*)dev; geo->heads = 1; geo->sectors = SECTORS_PER_TRACK; geo->cylinders = part->cylinders; return 0; } static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct partition *part; if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) return; part = kzalloc(sizeof(struct partition), GFP_KERNEL); if (!part) return; part->mbd.mtd = mtd; if (block_size) part->block_size = block_size; else { if (!mtd->erasesize) { printk(KERN_WARNING PREFIX "please provide block_size"); goto out; } else part->block_size = mtd->erasesize; } if (scan_header(part) == 0) { part->mbd.size = part->sector_count; part->mbd.tr = tr; part->mbd.devnum = -1; if (!(mtd->flags & MTD_WRITEABLE)) part->mbd.readonly = 1; else if (part->errors) { printk(KERN_WARNING PREFIX "'%s': errors found, " "setting read-only\n", mtd->name); part->mbd.readonly = 1; } printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n", mtd->name, mtd->type, mtd->flags); if (!add_mtd_blktrans_dev((void*)part)) return; } out: kfree(part); } static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) { struct partition *part = (struct partition*)dev; int i; for (i=0; i<part->total_blocks; i++) { pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n", part->mbd.mtd->name, i, part->blocks[i].erases); } del_mtd_blktrans_dev(dev); vfree(part->sector_map); kfree(part->header_cache); kfree(part->blocks); } static struct mtd_blktrans_ops rfd_ftl_tr = { .name = "rfd", .major = RFD_FTL_MAJOR, .part_bits = PART_BITS, .blksize = SECTOR_SIZE, .readsect = rfd_ftl_readsect, .writesect = rfd_ftl_writesect, .getgeo = rfd_ftl_getgeo, .add_mtd = rfd_ftl_add_mtd, .remove_dev = rfd_ftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_rfd_ftl(void) { return register_mtd_blktrans(&rfd_ftl_tr); } static void __exit cleanup_rfd_ftl(void) { deregister_mtd_blktrans(&rfd_ftl_tr); } module_init(init_rfd_ftl); module_exit(cleanup_rfd_ftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, " "used by General Software's Embedded BIOS");
gpl-2.0
riptidewave93/meraki-linux
drivers/mtd/rfd_ftl.c
7802
18731
/* * rfd_ftl.c -- resident flash disk (flash translation layer) * * Copyright © 2005 Sean Young <sean@mess.org> * * This type of flash translation layer (FTL) is used by the Embedded BIOS * by General Software. It is known as the Resident Flash Disk (RFD), see: * * http://www.gensw.com/pages/prod/bios/rfd.htm * * based on ftl.c */ #include <linux/hdreg.h> #include <linux/init.h> #include <linux/mtd/blktrans.h> #include <linux/mtd/mtd.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/module.h> #include <asm/types.h> static int block_size = 0; module_param(block_size, int, 0); MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); #define PREFIX "rfd_ftl: " /* This major has been assigned by device@lanana.org */ #ifndef RFD_FTL_MAJOR #define RFD_FTL_MAJOR 256 #endif /* Maximum number of partitions in an FTL region */ #define PART_BITS 4 /* An erase unit should start with this value */ #define RFD_MAGIC 0x9193 /* the second value is 0xffff or 0xffc8; function unknown */ /* the third value is always 0xffff, ignored */ /* next is an array of mapping for each corresponding sector */ #define HEADER_MAP_OFFSET 3 #define SECTOR_DELETED 0x0000 #define SECTOR_ZERO 0xfffe #define SECTOR_FREE 0xffff #define SECTOR_SIZE 512 #define SECTORS_PER_TRACK 63 struct block { enum { BLOCK_OK, BLOCK_ERASING, BLOCK_ERASED, BLOCK_UNUSED, BLOCK_FAILED } state; int free_sectors; int used_sectors; int erases; u_long offset; }; struct partition { struct mtd_blktrans_dev mbd; u_int block_size; /* size of erase unit */ u_int total_blocks; /* number of erase units */ u_int header_sectors_per_block; /* header sectors in erase unit */ u_int data_sectors_per_block; /* data sectors in erase unit */ u_int sector_count; /* sectors in translated disk */ u_int header_size; /* bytes in header sector */ int reserved_block; /* block next up for reclaim */ int current_block; /* block to write to */ u16 *header_cache; /* cached header */ int is_reclaiming; int cylinders; int errors; u_long *sector_map; struct block *blocks; }; static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); static int build_block_map(struct partition *part, int block_no) { struct block *block = &part->blocks[block_no]; int i; block->offset = part->block_size * block_no; if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { block->state = BLOCK_UNUSED; return -ENOENT; } block->state = BLOCK_OK; for (i=0; i<part->data_sectors_per_block; i++) { u16 entry; entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); if (entry == SECTOR_DELETED) continue; if (entry == SECTOR_FREE) { block->free_sectors++; continue; } if (entry == SECTOR_ZERO) entry = 0; if (entry >= part->sector_count) { printk(KERN_WARNING PREFIX "'%s': unit #%d: entry %d corrupt, " "sector %d out of range\n", part->mbd.mtd->name, block_no, i, entry); continue; } if (part->sector_map[entry] != -1) { printk(KERN_WARNING PREFIX "'%s': more than one entry for sector %d\n", part->mbd.mtd->name, entry); part->errors = 1; continue; } part->sector_map[entry] = block->offset + (i + part->header_sectors_per_block) * SECTOR_SIZE; block->used_sectors++; } if (block->free_sectors == part->data_sectors_per_block) part->reserved_block = block_no; return 0; } static int scan_header(struct partition *part) { int sectors_per_block; int i, rc = -ENOMEM; int blocks_found; size_t retlen; sectors_per_block = part->block_size / SECTOR_SIZE; part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; if (part->total_blocks < 2) return -ENOENT; /* each erase block has three bytes header, followed by the map */ part->header_sectors_per_block = ((HEADER_MAP_OFFSET + sectors_per_block) * sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; part->data_sectors_per_block = sectors_per_block - part->header_sectors_per_block; part->header_size = (HEADER_MAP_OFFSET + part->data_sectors_per_block) * sizeof(u16); part->cylinders = (part->data_sectors_per_block * (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; part->sector_count = part->cylinders * SECTORS_PER_TRACK; part->current_block = -1; part->reserved_block = -1; part->is_reclaiming = 0; part->header_cache = kmalloc(part->header_size, GFP_KERNEL); if (!part->header_cache) goto err; part->blocks = kcalloc(part->total_blocks, sizeof(struct block), GFP_KERNEL); if (!part->blocks) goto err; part->sector_map = vmalloc(part->sector_count * sizeof(u_long)); if (!part->sector_map) { printk(KERN_ERR PREFIX "'%s': unable to allocate memory for " "sector map", part->mbd.mtd->name); goto err; } for (i=0; i<part->sector_count; i++) part->sector_map[i] = -1; for (i=0, blocks_found=0; i<part->total_blocks; i++) { rc = mtd_read(part->mbd.mtd, i * part->block_size, part->header_size, &retlen, (u_char *)part->header_cache); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) goto err; if (!build_block_map(part, i)) blocks_found++; } if (blocks_found == 0) { printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n", part->mbd.mtd->name); rc = -ENOENT; goto err; } if (part->reserved_block == -1) { printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n", part->mbd.mtd->name); part->errors = 1; } return 0; err: vfree(part->sector_map); kfree(part->header_cache); kfree(part->blocks); return rc; } static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) { struct partition *part = (struct partition*)dev; u_long addr; size_t retlen; int rc; if (sector >= part->sector_count) return -EIO; addr = part->sector_map[sector]; if (addr != -1) { rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char *)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_WARNING PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); return rc; } } else memset(buf, 0, SECTOR_SIZE); return 0; } static void erase_callback(struct erase_info *erase) { struct partition *part; u16 magic; int i, rc; size_t retlen; part = (struct partition*)erase->priv; i = (u32)erase->addr / part->block_size; if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || erase->addr > UINT_MAX) { printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); return; } if (erase->state != MTD_ERASE_DONE) { printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " "state %d\n", (unsigned long long)erase->addr, part->mbd.mtd->name, erase->state); part->blocks[i].state = BLOCK_FAILED; part->blocks[i].free_sectors = 0; part->blocks[i].used_sectors = 0; kfree(erase); return; } magic = cpu_to_le16(RFD_MAGIC); part->blocks[i].state = BLOCK_ERASED; part->blocks[i].free_sectors = part->data_sectors_per_block; part->blocks[i].used_sectors = 0; part->blocks[i].erases++; rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), &retlen, (u_char *)&magic); if (!rc && retlen != sizeof(magic)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to write RFD " "header at 0x%lx\n", part->mbd.mtd->name, part->blocks[i].offset); part->blocks[i].state = BLOCK_FAILED; } else part->blocks[i].state = BLOCK_OK; kfree(erase); } static int erase_block(struct partition *part, int block) { struct erase_info *erase; int rc = -ENOMEM; erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); if (!erase) goto err; erase->mtd = part->mbd.mtd; erase->callback = erase_callback; erase->addr = part->blocks[block].offset; erase->len = part->block_size; erase->priv = (u_long)part; part->blocks[block].state = BLOCK_ERASING; part->blocks[block].free_sectors = 0; rc = mtd_erase(part->mbd.mtd, erase); if (rc) { printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " "failed\n", (unsigned long long)erase->addr, (unsigned long long)erase->len, part->mbd.mtd->name); kfree(erase); } err: return rc; } static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) { void *sector_data; u16 *map; size_t retlen; int i, rc = -ENOMEM; part->is_reclaiming = 1; sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); if (!sector_data) goto err3; map = kmalloc(part->header_size, GFP_KERNEL); if (!map) goto err2; rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, part->header_size, &retlen, (u_char *)map); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block_no].offset); goto err; } for (i=0; i<part->data_sectors_per_block; i++) { u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); u_long addr; if (entry == SECTOR_FREE || entry == SECTOR_DELETED) continue; if (entry == SECTOR_ZERO) entry = 0; /* already warned about and ignored in build_block_map() */ if (entry >= part->sector_count) continue; addr = part->blocks[block_no].offset + (i + part->header_sectors_per_block) * SECTOR_SIZE; if (*old_sector == addr) { *old_sector = -1; if (!part->blocks[block_no].used_sectors--) { rc = erase_block(part, block_no); break; } continue; } rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, sector_data); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': Unable to " "read sector for relocation\n", part->mbd.mtd->name); goto err; } rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, entry, sector_data); if (rc) goto err; } err: kfree(map); err2: kfree(sector_data); err3: part->is_reclaiming = 0; return rc; } static int reclaim_block(struct partition *part, u_long *old_sector) { int block, best_block, score, old_sector_block; int rc; /* we have a race if sync doesn't exist */ mtd_sync(part->mbd.mtd); score = 0x7fffffff; /* MAX_INT */ best_block = -1; if (*old_sector != -1) old_sector_block = *old_sector / part->block_size; else old_sector_block = -1; for (block=0; block<part->total_blocks; block++) { int this_score; if (block == part->reserved_block) continue; /* * Postpone reclaiming if there is a free sector as * more removed sectors is more efficient (have to move * less). */ if (part->blocks[block].free_sectors) return 0; this_score = part->blocks[block].used_sectors; if (block == old_sector_block) this_score--; else { /* no point in moving a full block */ if (part->blocks[block].used_sectors == part->data_sectors_per_block) continue; } this_score += part->blocks[block].erases; if (this_score < score) { best_block = block; score = this_score; } } if (best_block == -1) return -ENOSPC; part->current_block = -1; part->reserved_block = best_block; pr_debug("reclaim_block: reclaiming block #%d with %d used " "%d free sectors\n", best_block, part->blocks[best_block].used_sectors, part->blocks[best_block].free_sectors); if (part->blocks[best_block].used_sectors) rc = move_block_contents(part, best_block, old_sector); else rc = erase_block(part, best_block); return rc; } /* * IMPROVE: It would be best to choose the block with the most deleted sectors, * because if we fill that one up first it'll have the most chance of having * the least live sectors at reclaim. */ static int find_free_block(struct partition *part) { int block, stop; block = part->current_block == -1 ? jiffies % part->total_blocks : part->current_block; stop = block; do { if (part->blocks[block].free_sectors && block != part->reserved_block) return block; if (part->blocks[block].state == BLOCK_UNUSED) erase_block(part, block); if (++block >= part->total_blocks) block = 0; } while (block != stop); return -1; } static int find_writable_block(struct partition *part, u_long *old_sector) { int rc, block; size_t retlen; block = find_free_block(part); if (block == -1) { if (!part->is_reclaiming) { rc = reclaim_block(part, old_sector); if (rc) goto err; block = find_free_block(part); } if (block == -1) { rc = -ENOSPC; goto err; } } rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, part->header_size, &retlen, (u_char *)part->header_cache); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to read header at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block].offset); goto err; } part->current_block = block; err: return rc; } static int mark_sector_deleted(struct partition *part, u_long old_addr) { int block, offset, rc; u_long addr; size_t retlen; u16 del = cpu_to_le16(SECTOR_DELETED); block = old_addr / part->block_size; offset = (old_addr % part->block_size) / SECTOR_SIZE - part->header_sectors_per_block; addr = part->blocks[block].offset + (HEADER_MAP_OFFSET + offset) * sizeof(u16); rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, (u_char *)&del); if (!rc && retlen != sizeof(del)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } if (block == part->current_block) part->header_cache[offset + HEADER_MAP_OFFSET] = del; part->blocks[block].used_sectors--; if (!part->blocks[block].used_sectors && !part->blocks[block].free_sectors) rc = erase_block(part, block); err: return rc; } static int find_free_sector(const struct partition *part, const struct block *block) { int i, stop; i = stop = part->data_sectors_per_block - block->free_sectors; do { if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) == SECTOR_FREE) return i; if (++i == part->data_sectors_per_block) i = 0; } while(i != stop); return -1; } static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) { struct partition *part = (struct partition*)dev; struct block *block; u_long addr; int i; int rc; size_t retlen; u16 entry; if (part->current_block == -1 || !part->blocks[part->current_block].free_sectors) { rc = find_writable_block(part, old_addr); if (rc) goto err; } block = &part->blocks[part->current_block]; i = find_free_sector(part, block); if (i < 0) { rc = -ENOSPC; goto err; } addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + block->offset; rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char *)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } part->sector_map[sector] = addr; entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); part->header_cache[i + HEADER_MAP_OFFSET] = entry; addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, (u_char *)&entry); if (!rc && retlen != sizeof(entry)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } block->used_sectors++; block->free_sectors--; err: return rc; } static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) { struct partition *part = (struct partition*)dev; u_long old_addr; int i; int rc = 0; pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); if (part->reserved_block == -1) { rc = -EACCES; goto err; } if (sector >= part->sector_count) { rc = -EIO; goto err; } old_addr = part->sector_map[sector]; for (i=0; i<SECTOR_SIZE; i++) { if (!buf[i]) continue; rc = do_writesect(dev, sector, buf, &old_addr); if (rc) goto err; break; } if (i == SECTOR_SIZE) part->sector_map[sector] = -1; if (old_addr != -1) rc = mark_sector_deleted(part, old_addr); err: return rc; } static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct partition *part = (struct partition*)dev; geo->heads = 1; geo->sectors = SECTORS_PER_TRACK; geo->cylinders = part->cylinders; return 0; } static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct partition *part; if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) return; part = kzalloc(sizeof(struct partition), GFP_KERNEL); if (!part) return; part->mbd.mtd = mtd; if (block_size) part->block_size = block_size; else { if (!mtd->erasesize) { printk(KERN_WARNING PREFIX "please provide block_size"); goto out; } else part->block_size = mtd->erasesize; } if (scan_header(part) == 0) { part->mbd.size = part->sector_count; part->mbd.tr = tr; part->mbd.devnum = -1; if (!(mtd->flags & MTD_WRITEABLE)) part->mbd.readonly = 1; else if (part->errors) { printk(KERN_WARNING PREFIX "'%s': errors found, " "setting read-only\n", mtd->name); part->mbd.readonly = 1; } printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n", mtd->name, mtd->type, mtd->flags); if (!add_mtd_blktrans_dev((void*)part)) return; } out: kfree(part); } static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) { struct partition *part = (struct partition*)dev; int i; for (i=0; i<part->total_blocks; i++) { pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n", part->mbd.mtd->name, i, part->blocks[i].erases); } del_mtd_blktrans_dev(dev); vfree(part->sector_map); kfree(part->header_cache); kfree(part->blocks); } static struct mtd_blktrans_ops rfd_ftl_tr = { .name = "rfd", .major = RFD_FTL_MAJOR, .part_bits = PART_BITS, .blksize = SECTOR_SIZE, .readsect = rfd_ftl_readsect, .writesect = rfd_ftl_writesect, .getgeo = rfd_ftl_getgeo, .add_mtd = rfd_ftl_add_mtd, .remove_dev = rfd_ftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_rfd_ftl(void) { return register_mtd_blktrans(&rfd_ftl_tr); } static void __exit cleanup_rfd_ftl(void) { deregister_mtd_blktrans(&rfd_ftl_tr); } module_init(init_rfd_ftl); module_exit(cleanup_rfd_ftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, " "used by General Software's Embedded BIOS");
gpl-2.0
mpe/powerpc
drivers/char/ipmi/kcs_bmc.c
379
4757
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015-2018, Intel Corporation. * Copyright (c) 2021, IBM Corp. */ #include <linux/device.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include "kcs_bmc.h" /* Implement both the device and client interfaces here */ #include "kcs_bmc_device.h" #include "kcs_bmc_client.h" /* Record registered devices and drivers */ static DEFINE_MUTEX(kcs_bmc_lock); static LIST_HEAD(kcs_bmc_devices); static LIST_HEAD(kcs_bmc_drivers); /* Consumer data access */ u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc) { return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr); } EXPORT_SYMBOL(kcs_bmc_read_data); void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data) { kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data); } EXPORT_SYMBOL(kcs_bmc_write_data); u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc) { return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str); } EXPORT_SYMBOL(kcs_bmc_read_status); void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data) { kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data); } EXPORT_SYMBOL(kcs_bmc_write_status); void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val) { kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val); } EXPORT_SYMBOL(kcs_bmc_update_status); irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_client *client; irqreturn_t rc = IRQ_NONE; spin_lock(&kcs_bmc->lock); client = kcs_bmc->client; if (client) rc = client->ops->event(client); spin_unlock(&kcs_bmc->lock); return rc; } EXPORT_SYMBOL(kcs_bmc_handle_event); int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client) { int rc; spin_lock_irq(&kcs_bmc->lock); if (kcs_bmc->client) { rc = -EBUSY; } else { u8 mask = KCS_BMC_EVENT_TYPE_IBF; kcs_bmc->client = client; kcs_bmc_update_event_mask(kcs_bmc, mask, mask); rc = 0; } spin_unlock_irq(&kcs_bmc->lock); return rc; } EXPORT_SYMBOL(kcs_bmc_enable_device); void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client) { spin_lock_irq(&kcs_bmc->lock); if (client == kcs_bmc->client) { u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE; kcs_bmc_update_event_mask(kcs_bmc, mask, 0); kcs_bmc->client = NULL; } spin_unlock_irq(&kcs_bmc->lock); } EXPORT_SYMBOL(kcs_bmc_disable_device); int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_driver *drv; int error = 0; int rc; spin_lock_init(&kcs_bmc->lock); kcs_bmc->client = NULL; mutex_lock(&kcs_bmc_lock); list_add(&kcs_bmc->entry, &kcs_bmc_devices); list_for_each_entry(drv, &kcs_bmc_drivers, entry) { rc = drv->ops->add_device(kcs_bmc); if (!rc) continue; dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d", kcs_bmc->channel, rc); error = rc; } mutex_unlock(&kcs_bmc_lock); return error; } EXPORT_SYMBOL(kcs_bmc_add_device); void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_driver *drv; int rc; mutex_lock(&kcs_bmc_lock); list_del(&kcs_bmc->entry); list_for_each_entry(drv, &kcs_bmc_drivers, entry) { rc = drv->ops->remove_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_remove_device); void kcs_bmc_register_driver(struct kcs_bmc_driver *drv) { struct kcs_bmc_device *kcs_bmc; int rc; mutex_lock(&kcs_bmc_lock); list_add(&drv->entry, &kcs_bmc_drivers); list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) { rc = drv->ops->add_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_register_driver); void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv) { struct kcs_bmc_device *kcs_bmc; int rc; mutex_lock(&kcs_bmc_lock); list_del(&drv->entry); list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) { rc = drv->ops->remove_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_unregister_driver); void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events) { kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events); } EXPORT_SYMBOL(kcs_bmc_update_event_mask); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>"); MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
gpl-2.0
1N4148/OpenWRT_CC
tools/firmware-utils/src/mkporayfw.c
379
16853
/* * Builder/viewer/extractor utility for Poray firmware image files * * Copyright (C) 2013 Michel Stempin <michel.stempin@wanadoo.fr> * Copyright (C) 2013 Felix Kaechele <felix@fetzig.org> * Copyright (C) 2013 <admin@openschemes.com> * * This tool is based on: * TP-Link firmware upgrade tool. * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> * * Itself based on: * TP-Link WR941 V2 firmware checksum fixing tool. * Copyright (C) 2008,2009 Wang Jian <lark@linux.net.cn> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <libgen.h> #include <getopt.h> #include <stdarg.h> #include <errno.h> #include <sys/stat.h> #include <arpa/inet.h> #include <netinet/in.h> #if (__BYTE_ORDER == __BIG_ENDIAN) # define HOST_TO_BE32(x) (x) # define BE32_TO_HOST(x) (x) # define HOST_TO_LE32(x) bswap_32(x) # define LE32_TO_HOST(x) bswap_32(x) #else # define HOST_TO_BE32(x) bswap_32(x) # define BE32_TO_HOST(x) bswap_32(x) # define HOST_TO_LE32(x) (x) # define LE32_TO_HOST(x) (x) #endif /* Fixed header flags */ #define HEADER_FLAGS 0x020e0000 /* Recognized Hardware ID magic */ #define HWID_HAME_MPR_A1_L8 0x32473352 #define HWID_PORAY_R50B 0x31353033 #define HWID_PORAY_R50D 0x33353033 #define HWID_PORAY_R50E 0x34353033 #define HWID_PORAY_M3 0x31353335 #define HWID_PORAY_M4 0x32353335 #define HWID_PORAY_Q3 0x33353335 #define HWID_PORAY_X5_X6 0x35353335 #define HWID_PORAY_X8 0x36353335 #define HWID_PORAY_X1 0x38353335 #define HWID_NEXX_WT1520 0x30353332 #define HWID_NEXX_WT3020 0x30323033 #define HWID_A5_V11 0x32473352 /* Recognized XOR obfuscation keys */ #define KEY_HAME 0 #define KEY_PORAY_1 1 #define KEY_PORAY_2 2 #define KEY_PORAY_3 3 #define KEY_PORAY_4 4 #define KEY_NEXX_1 5 #define KEY_NEXX_2 6 #define KEY_A5_V11 7 /* XOR key length */ #define KEY_LEN 15 struct file_info { char *file_name; /* Name of the file */ uint32_t file_size; /* Length of the file */ }; struct fw_header { uint32_t hw_id; /* Hardware id */ uint32_t firmware_len; /* Firmware data length */ uint32_t flags; /* Header flags */ uint8_t pad[16]; } __attribute__ ((packed)); struct flash_layout { char *id; uint32_t fw_max_len; }; struct board_info { char *id; uint32_t hw_id; char *layout_id; uint32_t key; }; /* * Globals */ static char *ofname; static char *progname; static char *board_id; static struct board_info *board; static char *layout_id; static struct flash_layout *layout; static char *opt_hw_id; static uint32_t hw_id; static struct file_info firmware_info; static uint32_t firmware_len = 0; static int inspect = 0; static int extract = 0; static uint8_t key[][KEY_LEN] = { {0xC8, 0x3C, 0x3A, 0x93, 0xA2, 0x95, 0xC3, 0x63, 0x48, 0x45, 0x58, 0x09, 0x12, 0x03, 0x08}, {0x89, 0x6B, 0x5A, 0x93, 0x92, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0xE6, 0xC7}, {0xC9, 0x1C, 0x3A, 0x93, 0x92, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0xE6, 0xC7}, {0x19, 0x1B, 0x3A, 0x93, 0x92, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0xE6, 0xC7}, {0x79, 0x7B, 0x7A, 0x93, 0x92, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0xE6, 0xC7}, {0x19, 0x1C, 0x4A, 0x93, 0x96, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0x16, 0xC6}, {0x39, 0x1C, 0x4A, 0x93, 0x96, 0x95, 0xC3, 0x63, 0xD0, 0xA3, 0x9C, 0x92, 0x2E, 0x16, 0xC6}, {0xC8, 0x3C, 0x3A, 0x93, 0xA2, 0x95, 0xC3, 0x63, 0x48, 0x45, 0x58, 0x09, 0x20, 0x11, 0x08}, }; static struct flash_layout layouts[] = { { .id = "4M", .fw_max_len = 0x3c0000, }, { .id = "8M", .fw_max_len = 0x7c0000, }, { /* terminating entry */ } }; static struct board_info boards[] = { { .id = "A5-V11", .hw_id = HWID_A5_V11, .layout_id = "4M", .key = KEY_A5_V11, }, { .id = "MPR-A1", .hw_id = HWID_HAME_MPR_A1_L8, .layout_id = "4M", .key = KEY_HAME, }, { .id = "MPR-L8", .hw_id = HWID_HAME_MPR_A1_L8, .layout_id = "4M", .key = KEY_HAME, }, { .id = "R50B", .hw_id = HWID_PORAY_R50B, .layout_id = "4M", .key = KEY_PORAY_2, }, { .id = "R50D", .hw_id = HWID_PORAY_R50D, .layout_id = "4M", .key = KEY_PORAY_3, }, { .id = "R50E", .hw_id = HWID_PORAY_R50E, .layout_id = "4M", .key = KEY_PORAY_4, }, { .id = "M3", .hw_id = HWID_PORAY_M3, .layout_id = "4M", .key = KEY_PORAY_1, }, { .id = "M4", .hw_id = HWID_PORAY_M4, .layout_id = "4M", .key = KEY_PORAY_1, }, { .id = "Q3", .hw_id = HWID_PORAY_Q3, .layout_id = "4M", .key = KEY_PORAY_1, }, { .id = "X5 or X6", .hw_id = HWID_PORAY_X5_X6, .layout_id = "8M", .key = KEY_PORAY_1, }, { .id = "X5", .hw_id = HWID_PORAY_X5_X6, .layout_id = "8M", .key = KEY_PORAY_1, }, { .id = "X6", .hw_id = HWID_PORAY_X5_X6, .layout_id = "8M", .key = KEY_PORAY_1, }, { .id = "X8", .hw_id = HWID_PORAY_X8, .layout_id = "8M", .key = KEY_PORAY_1, }, { .id = "X1", .hw_id = HWID_PORAY_X1, .layout_id = "8M", .key = KEY_PORAY_1, }, { .id = "WT1520", .hw_id = HWID_NEXX_WT1520, .layout_id = "4M", .key = KEY_NEXX_1, }, { .id = "WT1520", .hw_id = HWID_NEXX_WT1520, .layout_id = "8M", .key = KEY_NEXX_1, }, { .id = "WT3020", .hw_id = HWID_NEXX_WT3020, .layout_id = "4M", .key = KEY_NEXX_2, }, { .id = "WT3020", .hw_id = HWID_NEXX_WT3020, .layout_id = "8M", .key = KEY_NEXX_2, }, { /* terminating entry */ } }; /* * Message macros */ #define ERR(fmt, ...) do { \ fflush(0); \ fprintf(stderr, "[%s] *** error: " fmt "\n", \ progname, ## __VA_ARGS__ ); \ } while (0) #define ERRS(fmt, ...) do { \ int save = errno; \ fflush(0); \ fprintf(stderr, "[%s] *** error: " fmt ":%s\n", \ progname, ## __VA_ARGS__, strerror(save)); \ } while (0) #define DBG(fmt, ...) do { \ fprintf(stderr, "[%s] " fmt "\n", progname, ## __VA_ARGS__ ); \ } while (0) /* * Find a board by its name */ static struct board_info *find_board(char *id) { struct board_info *ret; struct board_info *board; ret = NULL; for (board = boards; board->id != NULL; board++){ if (strcasecmp(id, board->id) == 0) { ret = board; break; } }; return ret; } /* * Find a board by its hardware ID */ static struct board_info *find_board_by_hwid(uint32_t hw_id) { struct board_info *board; for (board = boards; board->id != NULL; board++) { if (hw_id == board->hw_id) return board; }; return NULL; } /* * Find a Flash memory layout by its name */ static struct flash_layout *find_layout(char *id) { struct flash_layout *ret; struct flash_layout *l; ret = NULL; for (l = layouts; l->id != NULL; l++){ if (strcasecmp(id, l->id) == 0) { ret = l; break; } }; return ret; } /* * Display usage */ static void usage(int status) { FILE *stream = (status != EXIT_SUCCESS) ? stderr : stdout; fprintf(stream, "Usage: %s [OPTIONS...]\n", progname); fprintf(stream, "\n" "Options:\n" " -B <board> create image for the board specified with <board>\n" " -H <hwid> use hardware id specified with <hwid>\n" " -F <id> use flash layout specified with <id>\n" " -f <file> read firmware image from the file <file>\n" " -o <file> write output to the file <file>\n" " -i inspect given firmware file (requires -f)\n" " -x extract combined kernel and rootfs while inspecting (implies -i)\n" " -h show this screen\n" ); exit(status); } /* * Get file statistics */ static int get_file_stat(struct file_info *fdata) { struct stat st; int res; if (fdata->file_name == NULL) { return 0; } res = stat(fdata->file_name, &st); if (res){ ERRS("stat failed on %s", fdata->file_name); return res; } fdata->file_size = st.st_size; return 0; } /* * Read file into buffer */ static int read_to_buf(struct file_info *fdata, uint8_t *buf) { FILE *f; int ret = EXIT_FAILURE; f = fopen(fdata->file_name, "rb"); if (f == NULL) { ERRS("could not open \"%s\" for reading", fdata->file_name); goto out; } errno = 0; fread(buf, fdata->file_size, 1, f); if (errno != 0) { ERRS("unable to read from file \"%s\"", fdata->file_name); goto out_close; } ret = EXIT_SUCCESS; out_close: fclose(f); out: return ret; } /* * Check command line options */ static int check_options(void) { int ret; if (firmware_info.file_name == NULL) { ERR("no firmware image specified"); return -1; } ret = get_file_stat(&firmware_info); if (ret) return ret; if (inspect) return 0; if (board_id == NULL && opt_hw_id == NULL) { ERR("either board or hardware id must be specified"); return -1; } if (board_id) { board = find_board(board_id); if (board == NULL) { ERR("unknown/unsupported board id \"%s\"", board_id); return -1; } if (layout_id == NULL) { layout_id = board->layout_id; } hw_id = board->hw_id; } else { hw_id = strtoul(opt_hw_id, NULL, 0); board = find_board_by_hwid(hw_id); if (layout_id == NULL) { layout_id = board->layout_id; } } layout = find_layout(layout_id); if (layout == NULL) { ERR("unknown flash layout \"%s\"", layout_id); return -1; } firmware_len = firmware_info.file_size; if (firmware_info.file_size > layout->fw_max_len - sizeof (struct fw_header)) { ERR("firmware image is too big"); return -1; } if (ofname == NULL) { ERR("no output file specified"); return -1; } return 0; } /* * Fill in firmware header */ static void fill_header(uint8_t *buf) { struct fw_header *hdr = (struct fw_header *) buf; memset(hdr, 0, sizeof (struct fw_header)); hdr->hw_id = HOST_TO_LE32(hw_id); hdr->firmware_len = HOST_TO_LE32(firmware_len); hdr->flags = HOST_TO_LE32(HEADER_FLAGS); } /* * Compute firmware checksum */ static uint16_t checksum_fw(uint8_t *data, int len) { int i; int32_t checksum = 0; for (i = 0; i < len - 1; i += 2) { checksum += (data[i + 1] << 8) | data[i]; } if (i < len) { checksum += data[i]; } checksum = checksum + (checksum >> 16) + 0xffff; checksum = ~(checksum + (checksum >> 16)) & 0xffff; return (uint16_t) checksum; } /* * (De)obfuscate firmware using an XOR operation with a fixed length key */ static void xor_fw(uint8_t *data, int len) { int i; for (i = 0; i <= len; i++) { data[i] ^= key[board->key][i % KEY_LEN]; } } /* * Write firmware to file */ static int write_fw(uint8_t *data, int len) { FILE *f; int ret = EXIT_FAILURE; f = fopen(ofname, "wb"); if (f == NULL) { ERRS("could not open \"%s\" for writing", ofname); goto out; } errno = 0; fwrite(data, len, 1, f); if (errno) { ERRS("unable to write output file"); goto out_flush; } DBG("firmware file \"%s\" completed", ofname); ret = EXIT_SUCCESS; out_flush: fflush(f); fclose(f); if (ret != EXIT_SUCCESS) { unlink(ofname); } out: return ret; } /* * Build firmware file */ static int build_fw(void) { int buflen; uint8_t *buf, *p; int ret = EXIT_FAILURE; int writelen = 0; uint16_t checksum; buflen = layout->fw_max_len; buf = (uint8_t *) malloc(buflen); if (!buf) { ERR("no memory for buffer\n"); goto out; } memset(buf, 0xff, buflen); p = buf + sizeof (struct fw_header); ret = read_to_buf(&firmware_info, p); if (ret) { goto out_free_buf; } writelen = sizeof (struct fw_header) + firmware_len + 2; /* Fill in header */ fill_header(buf); /* Compute firmware checksum */ checksum = checksum_fw(buf + sizeof (struct fw_header), firmware_len); /* Cannot use network order function because checksum is not word-aligned */ buf[writelen - 1] = checksum >> 8; buf[writelen - 2] = checksum & 0xff; /* XOR obfuscate firmware */ xor_fw(buf + sizeof (struct fw_header), firmware_len + 2); /* Write firmware file */ ret = write_fw(buf, writelen); if (ret) { goto out_free_buf; } ret = EXIT_SUCCESS; out_free_buf: free(buf); out: return ret; } /* Helper functions to inspect_fw() representing different output formats */ static inline void inspect_fw_pstr(char *label, char *str) { printf("%-23s: %s\n", label, str); } static inline void inspect_fw_phex(char *label, uint32_t val) { printf("%-23s: 0x%08x\n", label, val); } static inline void inspect_fw_phexpost(char *label, uint32_t val, char *post) { printf("%-23s: 0x%08x (%s)\n", label, val, post); } static inline void inspect_fw_phexdef(char *label, uint32_t val, uint32_t defval) { printf("%-23s: 0x%08x ", label, val); if (val == defval) { printf("(== OpenWrt default)\n"); } else { printf("(OpenWrt default: 0x%08x)\n", defval); } } static inline void inspect_fw_phexexp(char *label, uint32_t val, uint32_t expval) { printf("%-23s: 0x%08x ", label, val); if (val == expval) { printf("(ok)\n"); } else { printf("(expected: 0x%08x)\n", expval); } } static inline void inspect_fw_phexdec(char *label, uint32_t val) { printf("%-23s: 0x%08x / %8u bytes\n", label, val, val); } static inline void inspect_fw_pchecksum(char *label, uint16_t val, uint16_t expval) { printf("%-23s: 0x%04x ", label, val); if (val == expval) { printf("(ok)\n"); } else { printf("(expected: 0x%04x)\n", expval); } } static int inspect_fw(void) { uint8_t *buf; struct fw_header *hdr; int ret = EXIT_FAILURE; uint16_t computed_checksum, file_checksum; buf = (uint8_t *) malloc(firmware_info.file_size); if (!buf) { ERR("no memory for buffer!\n"); goto out; } ret = read_to_buf(&firmware_info, buf); if (ret) { goto out_free_buf; } hdr = (struct fw_header *)buf; inspect_fw_pstr("File name", firmware_info.file_name); inspect_fw_phexdec("File size", firmware_info.file_size); printf("\n"); inspect_fw_phexdec("Header size", sizeof (struct fw_header)); board = find_board_by_hwid(LE32_TO_HOST(hdr->hw_id)); if (board) { layout = find_layout(board->layout_id); inspect_fw_phexpost("Hardware ID", LE32_TO_HOST( hdr->hw_id), board->id); } else { inspect_fw_phexpost("Hardware ID", LE32_TO_HOST(hdr->hw_id), "unknown"); } inspect_fw_phexdec("Firmware data length", LE32_TO_HOST(hdr->firmware_len)); inspect_fw_phexexp("Flags", LE32_TO_HOST(hdr->flags), HEADER_FLAGS); printf("\n"); /* XOR unobfuscate firmware */ xor_fw(buf + sizeof (struct fw_header), LE32_TO_HOST(hdr->firmware_len) + 2); /* Compute firmware checksum */ computed_checksum = checksum_fw(buf + sizeof (struct fw_header), LE32_TO_HOST(hdr->firmware_len)); /* Cannot use network order function because checksum is not word-aligned */ file_checksum = (buf[firmware_info.file_size - 1] << 8) | buf[firmware_info.file_size - 2]; inspect_fw_pchecksum("Firmware checksum", computed_checksum, file_checksum); /* Verify checksum */ if (computed_checksum != file_checksum) { ret = -1; ERR("checksums do not match"); goto out_free_buf; } printf("\n"); if (extract) { FILE *fp; char *filename; if (ofname == NULL) { filename = malloc(strlen(firmware_info.file_name) + 10); sprintf(filename, "%s-firmware", firmware_info.file_name); } else { filename = ofname; } printf("Extracting firmware to \"%s\"...\n", filename); fp = fopen(filename, "wb"); if (fp) { if (!fwrite(buf + sizeof (struct fw_header), LE32_TO_HOST(hdr->firmware_len), 1, fp)) { ERRS("error in fwrite(): %s", strerror(errno)); } fclose(fp); } else { ERRS("error in fopen(): %s", strerror(errno)); } if (ofname == NULL) { free(filename); } printf("\n"); } out_free_buf: free(buf); out: return ret; } /* * Main entry point */ int main(int argc, char *argv[]) { int ret = EXIT_FAILURE; progname = basename(argv[0]); int c; while ((c = getopt(argc, argv, "B:H:F:f:o:ixh")) != -1) { switch (c) { case 'B': board_id = optarg; break; case 'H': opt_hw_id = optarg; break; case 'F': layout_id = optarg; break; case 'f': firmware_info.file_name = optarg; break; case 'o': ofname = optarg; break; case 'i': inspect = 1; break; case 'x': inspect = 1; extract = 1; break; case 'h': usage(EXIT_SUCCESS); break; default: usage(EXIT_FAILURE); break; } } ret = check_options(); if (ret) { goto out; } if (!inspect) { ret = build_fw(); } else { ret = inspect_fw(); } out: return ret; }
gpl-2.0
radiohap/prd
drivers/media/platform/s5p-tv/mixer_drv.c
635
12411
/* * Samsung TV Mixer driver * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * * Tomasz Stanislawski, <t.stanislaws@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundiation. either version 2 of the License, * or (at your option) any later version */ #include "mixer.h" #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/clk.h> MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>"); MODULE_DESCRIPTION("Samsung MIXER"); MODULE_LICENSE("GPL"); /* --------- DRIVER PARAMETERS ---------- */ static struct mxr_output_conf mxr_output_conf[] = { { .output_name = "S5P HDMI connector", .module_name = "s5p-hdmi", .cookie = 1, }, { .output_name = "S5P SDO connector", .module_name = "s5p-sdo", .cookie = 0, }, }; void mxr_get_mbus_fmt(struct mxr_device *mdev, struct v4l2_mbus_framefmt *mbus_fmt) { struct v4l2_subdev *sd; struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; mutex_lock(&mdev->mutex); sd = to_outsd(mdev); ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); *mbus_fmt = fmt.format; WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name); mutex_unlock(&mdev->mutex); } void mxr_streamer_get(struct mxr_device *mdev) { mutex_lock(&mdev->mutex); ++mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); if (mdev->n_streamer == 1) { struct v4l2_subdev *sd = to_outsd(mdev); struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format; struct mxr_resources *res = &mdev->res; int ret; if (to_output(mdev)->cookie == 0) clk_set_parent(res->sclk_mixer, res->sclk_dac); else clk_set_parent(res->sclk_mixer, res->sclk_hdmi); mxr_reg_s_output(mdev, to_output(mdev)->cookie); ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name); ret = v4l2_subdev_call(sd, video, s_stream, 1); WARN(ret, "starting stream failed for output %s\n", sd->name); mxr_reg_set_mbus_fmt(mdev, mbus_fmt); mxr_reg_streamon(mdev); ret = mxr_reg_wait4vsync(mdev); WARN(ret, "failed to get vsync (%d) from output\n", ret); } mutex_unlock(&mdev->mutex); mxr_reg_dump(mdev); /* FIXME: what to do when streaming fails? */ } void mxr_streamer_put(struct mxr_device *mdev) { mutex_lock(&mdev->mutex); --mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); if (mdev->n_streamer == 0) { int ret; struct v4l2_subdev *sd = to_outsd(mdev); mxr_reg_streamoff(mdev); /* vsync applies Mixer setup */ ret = mxr_reg_wait4vsync(mdev); WARN(ret, "failed to get vsync (%d) from output\n", ret); ret = v4l2_subdev_call(sd, video, s_stream, 0); WARN(ret, "stopping stream failed for output %s\n", sd->name); } WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n", mdev->n_streamer); mutex_unlock(&mdev->mutex); mxr_reg_dump(mdev); } void mxr_output_get(struct mxr_device *mdev) { mutex_lock(&mdev->mutex); ++mdev->n_output; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output); /* turn on auxiliary driver */ if (mdev->n_output == 1) v4l2_subdev_call(to_outsd(mdev), core, s_power, 1); mutex_unlock(&mdev->mutex); } void mxr_output_put(struct mxr_device *mdev) { mutex_lock(&mdev->mutex); --mdev->n_output; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output); /* turn on auxiliary driver */ if (mdev->n_output == 0) v4l2_subdev_call(to_outsd(mdev), core, s_power, 0); WARN(mdev->n_output < 0, "negative number of output users (%d)\n", mdev->n_output); mutex_unlock(&mdev->mutex); } int mxr_power_get(struct mxr_device *mdev) { int ret = pm_runtime_get_sync(mdev->dev); /* returning 1 means that power is already enabled, * so zero success be returned */ if (IS_ERR_VALUE(ret)) return ret; return 0; } void mxr_power_put(struct mxr_device *mdev) { pm_runtime_put_sync(mdev->dev); } /* --------- RESOURCE MANAGEMENT -------------*/ static int mxr_acquire_plat_resources(struct mxr_device *mdev, struct platform_device *pdev) { struct resource *res; int ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr"); if (res == NULL) { mxr_err(mdev, "get memory resource failed.\n"); ret = -ENXIO; goto fail; } mdev->res.mxr_regs = ioremap(res->start, resource_size(res)); if (mdev->res.mxr_regs == NULL) { mxr_err(mdev, "register mapping failed.\n"); ret = -ENXIO; goto fail; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp"); if (res == NULL) { mxr_err(mdev, "get memory resource failed.\n"); ret = -ENXIO; goto fail_mxr_regs; } mdev->res.vp_regs = ioremap(res->start, resource_size(res)); if (mdev->res.vp_regs == NULL) { mxr_err(mdev, "register mapping failed.\n"); ret = -ENXIO; goto fail_mxr_regs; } res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); if (res == NULL) { mxr_err(mdev, "get interrupt resource failed.\n"); ret = -ENXIO; goto fail_vp_regs; } ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev); if (ret) { mxr_err(mdev, "request interrupt failed.\n"); goto fail_vp_regs; } mdev->res.irq = res->start; return 0; fail_vp_regs: iounmap(mdev->res.vp_regs); fail_mxr_regs: iounmap(mdev->res.mxr_regs); fail: return ret; } static void mxr_resource_clear_clocks(struct mxr_resources *res) { res->mixer = ERR_PTR(-EINVAL); res->vp = ERR_PTR(-EINVAL); res->sclk_mixer = ERR_PTR(-EINVAL); res->sclk_hdmi = ERR_PTR(-EINVAL); res->sclk_dac = ERR_PTR(-EINVAL); } static void mxr_release_plat_resources(struct mxr_device *mdev) { free_irq(mdev->res.irq, mdev); iounmap(mdev->res.vp_regs); iounmap(mdev->res.mxr_regs); } static void mxr_release_clocks(struct mxr_device *mdev) { struct mxr_resources *res = &mdev->res; if (!IS_ERR(res->sclk_dac)) clk_put(res->sclk_dac); if (!IS_ERR(res->sclk_hdmi)) clk_put(res->sclk_hdmi); if (!IS_ERR(res->sclk_mixer)) clk_put(res->sclk_mixer); if (!IS_ERR(res->vp)) clk_put(res->vp); if (!IS_ERR(res->mixer)) clk_put(res->mixer); } static int mxr_acquire_clocks(struct mxr_device *mdev) { struct mxr_resources *res = &mdev->res; struct device *dev = mdev->dev; mxr_resource_clear_clocks(res); res->mixer = clk_get(dev, "mixer"); if (IS_ERR(res->mixer)) { mxr_err(mdev, "failed to get clock 'mixer'\n"); goto fail; } res->vp = clk_get(dev, "vp"); if (IS_ERR(res->vp)) { mxr_err(mdev, "failed to get clock 'vp'\n"); goto fail; } res->sclk_mixer = clk_get(dev, "sclk_mixer"); if (IS_ERR(res->sclk_mixer)) { mxr_err(mdev, "failed to get clock 'sclk_mixer'\n"); goto fail; } res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR(res->sclk_hdmi)) { mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n"); goto fail; } res->sclk_dac = clk_get(dev, "sclk_dac"); if (IS_ERR(res->sclk_dac)) { mxr_err(mdev, "failed to get clock 'sclk_dac'\n"); goto fail; } return 0; fail: mxr_release_clocks(mdev); return -ENODEV; } static int mxr_acquire_resources(struct mxr_device *mdev, struct platform_device *pdev) { int ret; ret = mxr_acquire_plat_resources(mdev, pdev); if (ret) goto fail; ret = mxr_acquire_clocks(mdev); if (ret) goto fail_plat; mxr_info(mdev, "resources acquired\n"); return 0; fail_plat: mxr_release_plat_resources(mdev); fail: mxr_err(mdev, "resources acquire failed\n"); return ret; } static void mxr_release_resources(struct mxr_device *mdev) { mxr_release_clocks(mdev); mxr_release_plat_resources(mdev); memset(&mdev->res, 0, sizeof(mdev->res)); mxr_resource_clear_clocks(&mdev->res); } static void mxr_release_layers(struct mxr_device *mdev) { int i; for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i) if (mdev->layer[i]) mxr_layer_release(mdev->layer[i]); } static int mxr_acquire_layers(struct mxr_device *mdev, struct mxr_platform_data *pdata) { mdev->layer[0] = mxr_graph_layer_create(mdev, 0); mdev->layer[1] = mxr_graph_layer_create(mdev, 1); mdev->layer[2] = mxr_vp_layer_create(mdev, 0); if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) { mxr_err(mdev, "failed to acquire layers\n"); goto fail; } return 0; fail: mxr_release_layers(mdev); return -ENODEV; } /* ---------- POWER MANAGEMENT ----------- */ static int mxr_runtime_resume(struct device *dev) { struct mxr_device *mdev = to_mdev(dev); struct mxr_resources *res = &mdev->res; int ret; mxr_dbg(mdev, "resume - start\n"); mutex_lock(&mdev->mutex); /* turn clocks on */ ret = clk_prepare_enable(res->mixer); if (ret < 0) { dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n"); goto fail; } ret = clk_prepare_enable(res->vp); if (ret < 0) { dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n"); goto fail_mixer; } ret = clk_prepare_enable(res->sclk_mixer); if (ret < 0) { dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n"); goto fail_vp; } /* apply default configuration */ mxr_reg_reset(mdev); mxr_dbg(mdev, "resume - finished\n"); mutex_unlock(&mdev->mutex); return 0; fail_vp: clk_disable_unprepare(res->vp); fail_mixer: clk_disable_unprepare(res->mixer); fail: mutex_unlock(&mdev->mutex); dev_err(mdev->dev, "resume failed\n"); return ret; } static int mxr_runtime_suspend(struct device *dev) { struct mxr_device *mdev = to_mdev(dev); struct mxr_resources *res = &mdev->res; mxr_dbg(mdev, "suspend - start\n"); mutex_lock(&mdev->mutex); /* turn clocks off */ clk_disable_unprepare(res->sclk_mixer); clk_disable_unprepare(res->vp); clk_disable_unprepare(res->mixer); mutex_unlock(&mdev->mutex); mxr_dbg(mdev, "suspend - finished\n"); return 0; } static const struct dev_pm_ops mxr_pm_ops = { .runtime_suspend = mxr_runtime_suspend, .runtime_resume = mxr_runtime_resume, }; /* --------- DRIVER INITIALIZATION ---------- */ static int mxr_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mxr_platform_data *pdata = dev->platform_data; struct mxr_device *mdev; int ret; /* mdev does not exist yet so no mxr_dbg is used */ dev_info(dev, "probe start\n"); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) { dev_err(dev, "not enough memory.\n"); ret = -ENOMEM; goto fail; } /* setup pointer to master device */ mdev->dev = dev; mutex_init(&mdev->mutex); spin_lock_init(&mdev->reg_slock); init_waitqueue_head(&mdev->event_queue); /* acquire resources: regs, irqs, clocks, regulators */ ret = mxr_acquire_resources(mdev, pdev); if (ret) goto fail_mem; /* configure resources for video output */ ret = mxr_acquire_video(mdev, mxr_output_conf, ARRAY_SIZE(mxr_output_conf)); if (ret) goto fail_resources; /* configure layers */ ret = mxr_acquire_layers(mdev, pdata); if (ret) goto fail_video; pm_runtime_enable(dev); mxr_info(mdev, "probe successful\n"); return 0; fail_video: mxr_release_video(mdev); fail_resources: mxr_release_resources(mdev); fail_mem: kfree(mdev); fail: dev_info(dev, "probe failed\n"); return ret; } static int mxr_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mxr_device *mdev = to_mdev(dev); pm_runtime_disable(dev); mxr_release_layers(mdev); mxr_release_video(mdev); mxr_release_resources(mdev); kfree(mdev); dev_info(dev, "remove successful\n"); return 0; } static struct platform_driver mxr_driver __refdata = { .probe = mxr_probe, .remove = mxr_remove, .driver = { .name = MXR_DRIVER_NAME, .pm = &mxr_pm_ops, } }; static int __init mxr_init(void) { int i, ret; static const char banner[] __initconst = "Samsung TV Mixer driver, " "(c) 2010-2011 Samsung Electronics Co., Ltd.\n"; pr_info("%s\n", banner); /* Loading auxiliary modules */ for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i) request_module(mxr_output_conf[i].module_name); ret = platform_driver_register(&mxr_driver); if (ret != 0) { pr_err("s5p-tv: registration of MIXER driver failed\n"); return -ENXIO; } return 0; } module_init(mxr_init); static void __exit mxr_exit(void) { platform_driver_unregister(&mxr_driver); } module_exit(mxr_exit);
gpl-2.0
Basler/linux-usb-zerocopy
fs/sysfs/symlink.c
2171
4901
/* * fs/sysfs/symlink.c - sysfs symlink implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. * * Please see Documentation/filesystems/sysfs.txt for more information. */ #include <linux/fs.h> #include <linux/module.h> #include <linux/kobject.h> #include <linux/mutex.h> #include <linux/security.h> #include "sysfs.h" static int sysfs_do_create_link_sd(struct kernfs_node *parent, struct kobject *target_kobj, const char *name, int warn) { struct kernfs_node *kn, *target = NULL; BUG_ON(!name || !parent); /* * We don't own @target_kobj and it may be removed at any time. * Synchronize using sysfs_symlink_target_lock. See * sysfs_remove_dir() for details. */ spin_lock(&sysfs_symlink_target_lock); if (target_kobj->sd) { target = target_kobj->sd; kernfs_get(target); } spin_unlock(&sysfs_symlink_target_lock); if (!target) return -ENOENT; kn = kernfs_create_link(parent, name, target); kernfs_put(target); if (!IS_ERR(kn)) return 0; if (warn && PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, name); return PTR_ERR(kn); } /** * sysfs_create_link_sd - create symlink to a given object. * @kn: directory we're creating the link in. * @target: object we're pointing to. * @name: name of the symlink. */ int sysfs_create_link_sd(struct kernfs_node *kn, struct kobject *target, const char *name) { return sysfs_do_create_link_sd(kn, target, name, 1); } static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target, const char *name, int warn) { struct kernfs_node *parent = NULL; if (!kobj) parent = sysfs_root_kn; else parent = kobj->sd; if (!parent) return -EFAULT; return sysfs_do_create_link_sd(parent, target, name, warn); } /** * sysfs_create_link - create symlink between two objects. * @kobj: object whose directory we're creating the link in. * @target: object we're pointing to. * @name: name of the symlink. */ int sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name) { return sysfs_do_create_link(kobj, target, name, 1); } EXPORT_SYMBOL_GPL(sysfs_create_link); /** * sysfs_create_link_nowarn - create symlink between two objects. * @kobj: object whose directory we're creating the link in. * @target: object we're pointing to. * @name: name of the symlink. * * This function does the same as sysfs_create_link(), but it * doesn't warn if the link already exists. */ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name) { return sysfs_do_create_link(kobj, target, name, 0); } /** * sysfs_delete_link - remove symlink in object's directory. * @kobj: object we're acting for. * @targ: object we're pointing to. * @name: name of the symlink to remove. * * Unlike sysfs_remove_link sysfs_delete_link has enough information * to successfully delete symlinks in tagged directories. */ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ, const char *name) { const void *ns = NULL; /* * We don't own @target and it may be removed at any time. * Synchronize using sysfs_symlink_target_lock. See * sysfs_remove_dir() for details. */ spin_lock(&sysfs_symlink_target_lock); if (targ->sd && kernfs_ns_enabled(kobj->sd)) ns = targ->sd->ns; spin_unlock(&sysfs_symlink_target_lock); kernfs_remove_by_name_ns(kobj->sd, name, ns); } /** * sysfs_remove_link - remove symlink in object's directory. * @kobj: object we're acting for. * @name: name of the symlink to remove. */ void sysfs_remove_link(struct kobject *kobj, const char *name) { struct kernfs_node *parent = NULL; if (!kobj) parent = sysfs_root_kn; else parent = kobj->sd; kernfs_remove_by_name(parent, name); } EXPORT_SYMBOL_GPL(sysfs_remove_link); /** * sysfs_rename_link_ns - rename symlink in object's directory. * @kobj: object we're acting for. * @targ: object we're pointing to. * @old: previous name of the symlink. * @new: new name of the symlink. * @new_ns: new namespace of the symlink. * * A helper function for the common rename symlink idiom. */ int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *targ, const char *old, const char *new, const void *new_ns) { struct kernfs_node *parent, *kn = NULL; const void *old_ns = NULL; int result; if (!kobj) parent = sysfs_root_kn; else parent = kobj->sd; if (targ->sd) old_ns = targ->sd->ns; result = -ENOENT; kn = kernfs_find_and_get_ns(parent, old, old_ns); if (!kn) goto out; result = -EINVAL; if (kernfs_type(kn) != KERNFS_LINK) goto out; if (kn->symlink.target_kn->priv != targ) goto out; result = kernfs_rename_ns(kn, parent, new, new_ns); out: kernfs_put(kn); return result; } EXPORT_SYMBOL_GPL(sysfs_rename_link_ns);
gpl-2.0
septazzz/Lonas_KL_GT-I9300
tools/perf/util/ui/browser.c
2939
7800
#include "libslang.h" #include "ui.h" #include <linux/compiler.h> #include <linux/list.h> #include <linux/rbtree.h> #include <stdlib.h> #include <sys/ttydefaults.h> #include "browser.h" #include "helpline.h" #include "../color.h" #include "../util.h" #include <stdio.h> static int ui_browser__percent_color(double percent, bool current) { if (current) return HE_COLORSET_SELECTED; if (percent >= MIN_RED) return HE_COLORSET_TOP; if (percent >= MIN_GREEN) return HE_COLORSET_MEDIUM; return HE_COLORSET_NORMAL; } void ui_browser__set_color(struct ui_browser *self __used, int color) { SLsmg_set_color(color); } void ui_browser__set_percent_color(struct ui_browser *self, double percent, bool current) { int color = ui_browser__percent_color(percent, current); ui_browser__set_color(self, color); } void ui_browser__gotorc(struct ui_browser *self, int y, int x) { SLsmg_gotorc(self->y + y, self->x + x); } void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence) { struct list_head *head = self->entries; struct list_head *pos; switch (whence) { case SEEK_SET: pos = head->next; break; case SEEK_CUR: pos = self->top; break; case SEEK_END: pos = head->prev; break; default: return; } if (offset > 0) { while (offset-- != 0) pos = pos->next; } else { while (offset++ != 0) pos = pos->prev; } self->top = pos; } void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence) { struct rb_root *root = self->entries; struct rb_node *nd; switch (whence) { case SEEK_SET: nd = rb_first(root); break; case SEEK_CUR: nd = self->top; break; case SEEK_END: nd = rb_last(root); break; default: return; } if (offset > 0) { while (offset-- != 0) nd = rb_next(nd); } else { while (offset++ != 0) nd = rb_prev(nd); } self->top = nd; } unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self) { struct rb_node *nd; int row = 0; if (self->top == NULL) self->top = rb_first(self->entries); nd = self->top; while (nd != NULL) { ui_browser__gotorc(self, row, 0); self->write(self, nd, row); if (++row == self->height) break; nd = rb_next(nd); } return row; } bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row) { return self->top_idx + row == self->index; } void ui_browser__refresh_dimensions(struct ui_browser *self) { int cols, rows; newtGetScreenSize(&cols, &rows); self->width = cols - 1; self->height = rows - 2; self->y = 1; self->x = 0; } void ui_browser__reset_index(struct ui_browser *self) { self->index = self->top_idx = 0; self->seek(self, 0, SEEK_SET); } void ui_browser__add_exit_key(struct ui_browser *self, int key) { newtFormAddHotKey(self->form, key); } void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]) { int i = 0; while (keys[i] && i < 64) { ui_browser__add_exit_key(self, keys[i]); ++i; } } void __ui_browser__show_title(struct ui_browser *browser, const char *title) { SLsmg_gotorc(0, 0); ui_browser__set_color(browser, NEWT_COLORSET_ROOT); slsmg_write_nstring(title, browser->width); } void ui_browser__show_title(struct ui_browser *browser, const char *title) { pthread_mutex_lock(&ui__lock); __ui_browser__show_title(browser, title); pthread_mutex_unlock(&ui__lock); } int ui_browser__show(struct ui_browser *self, const char *title, const char *helpline, ...) { va_list ap; int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP, NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ', NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 }; if (self->form != NULL) newtFormDestroy(self->form); ui_browser__refresh_dimensions(self); self->form = newtForm(NULL, NULL, 0); if (self->form == NULL) return -1; self->sb = newtVerticalScrollbar(self->width, 1, self->height, HE_COLORSET_NORMAL, HE_COLORSET_SELECTED); if (self->sb == NULL) return -1; pthread_mutex_lock(&ui__lock); __ui_browser__show_title(self, title); ui_browser__add_exit_keys(self, keys); newtFormAddComponent(self->form, self->sb); va_start(ap, helpline); ui_helpline__vpush(helpline, ap); va_end(ap); pthread_mutex_unlock(&ui__lock); return 0; } void ui_browser__hide(struct ui_browser *self) { pthread_mutex_lock(&ui__lock); newtFormDestroy(self->form); self->form = NULL; ui_helpline__pop(); pthread_mutex_unlock(&ui__lock); } int ui_browser__refresh(struct ui_browser *self) { int row; pthread_mutex_lock(&ui__lock); newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); row = self->refresh(self); ui_browser__set_color(self, HE_COLORSET_NORMAL); SLsmg_fill_region(self->y + row, self->x, self->height - row, self->width, ' '); pthread_mutex_unlock(&ui__lock); return 0; } int ui_browser__run(struct ui_browser *self) { struct newtExitStruct es; if (ui_browser__refresh(self) < 0) return -1; while (1) { off_t offset; newtFormRun(self->form, &es); if (es.reason != NEWT_EXIT_HOTKEY) break; switch (es.u.key) { case NEWT_KEY_DOWN: if (self->index == self->nr_entries - 1) break; ++self->index; if (self->index == self->top_idx + self->height) { ++self->top_idx; self->seek(self, +1, SEEK_CUR); } break; case NEWT_KEY_UP: if (self->index == 0) break; --self->index; if (self->index < self->top_idx) { --self->top_idx; self->seek(self, -1, SEEK_CUR); } break; case NEWT_KEY_PGDN: case ' ': if (self->top_idx + self->height > self->nr_entries - 1) break; offset = self->height; if (self->index + offset > self->nr_entries - 1) offset = self->nr_entries - 1 - self->index; self->index += offset; self->top_idx += offset; self->seek(self, +offset, SEEK_CUR); break; case NEWT_KEY_PGUP: if (self->top_idx == 0) break; if (self->top_idx < self->height) offset = self->top_idx; else offset = self->height; self->index -= offset; self->top_idx -= offset; self->seek(self, -offset, SEEK_CUR); break; case NEWT_KEY_HOME: ui_browser__reset_index(self); break; case NEWT_KEY_END: offset = self->height - 1; if (offset >= self->nr_entries) offset = self->nr_entries - 1; self->index = self->nr_entries - 1; self->top_idx = self->index - offset; self->seek(self, -offset, SEEK_END); break; default: return es.u.key; } if (ui_browser__refresh(self) < 0) return -1; } return -1; } unsigned int ui_browser__list_head_refresh(struct ui_browser *self) { struct list_head *pos; struct list_head *head = self->entries; int row = 0; if (self->top == NULL || self->top == self->entries) self->top = head->next; pos = self->top; list_for_each_from(pos, head) { ui_browser__gotorc(self, row, 0); self->write(self, pos, row); if (++row == self->height) break; } return row; } static struct newtPercentTreeColors { const char *topColorFg, *topColorBg; const char *mediumColorFg, *mediumColorBg; const char *normalColorFg, *normalColorBg; const char *selColorFg, *selColorBg; const char *codeColorFg, *codeColorBg; } defaultPercentTreeColors = { "red", "lightgray", "green", "lightgray", "black", "lightgray", "lightgray", "magenta", "blue", "lightgray", }; void ui_browser__init(void) { struct newtPercentTreeColors *c = &defaultPercentTreeColors; sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg); sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg); sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg); sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg); sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg); }
gpl-2.0
Serranove/android_kernel_samsung_serranovelte
net/rds/ib_cm.c
3451
23780
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/ratelimit.h> #include "rds.h" #include "ib.h" static char *rds_ib_event_type_strings[] = { #define RDS_IB_EVENT_STRING(foo) \ [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo) RDS_IB_EVENT_STRING(CQ_ERR), RDS_IB_EVENT_STRING(QP_FATAL), RDS_IB_EVENT_STRING(QP_REQ_ERR), RDS_IB_EVENT_STRING(QP_ACCESS_ERR), RDS_IB_EVENT_STRING(COMM_EST), RDS_IB_EVENT_STRING(SQ_DRAINED), RDS_IB_EVENT_STRING(PATH_MIG), RDS_IB_EVENT_STRING(PATH_MIG_ERR), RDS_IB_EVENT_STRING(DEVICE_FATAL), RDS_IB_EVENT_STRING(PORT_ACTIVE), RDS_IB_EVENT_STRING(PORT_ERR), RDS_IB_EVENT_STRING(LID_CHANGE), RDS_IB_EVENT_STRING(PKEY_CHANGE), RDS_IB_EVENT_STRING(SM_CHANGE), RDS_IB_EVENT_STRING(SRQ_ERR), RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED), RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED), RDS_IB_EVENT_STRING(CLIENT_REREGISTER), #undef RDS_IB_EVENT_STRING }; static char *rds_ib_event_str(enum ib_event_type type) { return rds_str_array(rds_ib_event_type_strings, ARRAY_SIZE(rds_ib_event_type_strings), type); }; /* * Set the selected protocol version */ static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version) { conn->c_version = version; } /* * Set up flow control */ static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits) { struct rds_ib_connection *ic = conn->c_transport_data; if (rds_ib_sysctl_flow_control && credits != 0) { /* We're doing flow control */ ic->i_flowctl = 1; rds_ib_send_add_credits(conn, credits); } else { ic->i_flowctl = 0; } } /* * Tune RNR behavior. Without flow control, we use a rather * low timeout, but not the absolute minimum - this should * be tunable. * * We already set the RNR retry count to 7 (which is the * smallest infinite number :-) above. * If flow control is off, we want to change this back to 0 * so that we learn quickly when our credit accounting is * buggy. * * Caller passes in a qp_attr pointer - don't waste stack spacv * by allocation this twice. */ static void rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) { int ret; attr->min_rnr_timer = IB_RNR_TIMER_000_32; ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); if (ret) printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret); } /* * Connection established. * We get here for both outgoing and incoming connection. */ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) { const struct rds_ib_connect_private *dp = NULL; struct rds_ib_connection *ic = conn->c_transport_data; struct ib_qp_attr qp_attr; int err; if (event->param.conn.private_data_len >= sizeof(*dp)) { dp = event->param.conn.private_data; /* make sure it isn't empty data */ if (dp->dp_protocol_major) { rds_ib_set_protocol(conn, RDS_PROTOCOL(dp->dp_protocol_major, dp->dp_protocol_minor)); rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); } } if (conn->c_version < RDS_PROTOCOL(3,1)) { printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," " no longer supported\n", &conn->c_faddr, RDS_PROTOCOL_MAJOR(conn->c_version), RDS_PROTOCOL_MINOR(conn->c_version)); rds_conn_destroy(conn); return; } else { printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", &conn->c_faddr, RDS_PROTOCOL_MAJOR(conn->c_version), RDS_PROTOCOL_MINOR(conn->c_version), ic->i_flowctl ? ", flow control" : ""); } /* * Init rings and fill recv. this needs to wait until protocol negotiation * is complete, since ring layout is different from 3.0 to 3.1. */ rds_ib_send_init_ring(ic); rds_ib_recv_init_ring(ic); /* Post receive buffers - as a side effect, this will update * the posted credit count. */ rds_ib_recv_refill(conn, 1); /* Tune RNR behavior */ rds_ib_tune_rnr(ic, &qp_attr); qp_attr.qp_state = IB_QPS_RTS; err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); if (err) printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); /* update ib_device with this local ipaddr */ err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr); if (err) printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); /* If the peer gave us the last packet it saw, process this as if * we had received a regular ACK. */ if (dp && dp->dp_ack_seq) rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); rds_connect_complete(conn); } static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, struct rdma_conn_param *conn_param, struct rds_ib_connect_private *dp, u32 protocol_version, u32 max_responder_resources, u32 max_initiator_depth) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_device *rds_ibdev = ic->rds_ibdev; memset(conn_param, 0, sizeof(struct rdma_conn_param)); conn_param->responder_resources = min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); conn_param->initiator_depth = min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); conn_param->rnr_retry_count = 7; if (dp) { memset(dp, 0, sizeof(*dp)); dp->dp_saddr = conn->c_laddr; dp->dp_daddr = conn->c_faddr; dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); dp->dp_ack_seq = rds_ib_piggyb_ack(ic); /* Advertise flow control */ if (ic->i_flowctl) { unsigned int credits; credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)); dp->dp_credit = cpu_to_be32(credits); atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits); } conn_param->private_data = dp; conn_param->private_data_len = sizeof(*dp); } } static void rds_ib_cq_event_handler(struct ib_event *event, void *data) { rdsdebug("event %u (%s) data %p\n", event->event, rds_ib_event_str(event->event), data); } static void rds_ib_qp_event_handler(struct ib_event *event, void *data) { struct rds_connection *conn = data; struct rds_ib_connection *ic = conn->c_transport_data; rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, rds_ib_event_str(event->event)); switch (event->event) { case IB_EVENT_COMM_EST: rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); break; default: rdsdebug("Fatal QP Event %u (%s) " "- connection %pI4->%pI4, reconnecting\n", event->event, rds_ib_event_str(event->event), &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); break; } } /* * This needs to be very careful to not leave IS_ERR pointers around for * cleanup to trip over. */ static int rds_ib_setup_qp(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; struct ib_device *dev = ic->i_cm_id->device; struct ib_qp_init_attr attr; struct rds_ib_device *rds_ibdev; int ret; /* * It's normal to see a null device if an incoming connection races * with device removal, so we don't print a warning. */ rds_ibdev = rds_ib_get_client_data(dev); if (!rds_ibdev) return -EOPNOTSUPP; /* add the conn now so that connection establishment has the dev */ rds_ib_add_conn(rds_ibdev, conn); if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1) rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1); /* Protection domain and memory range */ ic->i_pd = rds_ibdev->pd; ic->i_mr = rds_ibdev->mr; ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, rds_ib_cq_event_handler, conn, ic->i_send_ring.w_nr + 1, 0); if (IS_ERR(ic->i_send_cq)) { ret = PTR_ERR(ic->i_send_cq); ic->i_send_cq = NULL; rdsdebug("ib_create_cq send failed: %d\n", ret); goto out; } ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler, rds_ib_cq_event_handler, conn, ic->i_recv_ring.w_nr, 0); if (IS_ERR(ic->i_recv_cq)) { ret = PTR_ERR(ic->i_recv_cq); ic->i_recv_cq = NULL; rdsdebug("ib_create_cq recv failed: %d\n", ret); goto out; } ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); if (ret) { rdsdebug("ib_req_notify_cq send failed: %d\n", ret); goto out; } ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); if (ret) { rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); goto out; } /* XXX negotiate max send/recv with remote? */ memset(&attr, 0, sizeof(attr)); attr.event_handler = rds_ib_qp_event_handler; attr.qp_context = conn; /* + 1 to allow for the single ack message */ attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1; attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; attr.cap.max_send_sge = rds_ibdev->max_sge; attr.cap.max_recv_sge = RDS_IB_RECV_SGE; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; attr.send_cq = ic->i_send_cq; attr.recv_cq = ic->i_recv_cq; /* * XXX this can fail if max_*_wr is too large? Are we supposed * to back off until we get a value that the hardware can support? */ ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); if (ret) { rdsdebug("rdma_create_qp failed: %d\n", ret); goto out; } ic->i_send_hdrs = ib_dma_alloc_coherent(dev, ic->i_send_ring.w_nr * sizeof(struct rds_header), &ic->i_send_hdrs_dma, GFP_KERNEL); if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); goto out; } ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, ic->i_recv_ring.w_nr * sizeof(struct rds_header), &ic->i_recv_hdrs_dma, GFP_KERNEL); if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); goto out; } ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), &ic->i_ack_dma, GFP_KERNEL); if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); goto out; } ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), ibdev_to_node(dev)); if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); goto out; } ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), ibdev_to_node(dev)); if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); goto out; } rds_ib_recv_init_ack(ic); rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, ic->i_send_cq, ic->i_recv_cq); out: rds_ib_dev_put(rds_ibdev); return ret; } static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) { const struct rds_ib_connect_private *dp = event->param.conn.private_data; u16 common; u32 version = 0; /* * rdma_cm private data is odd - when there is any private data in the * request, we will be given a pretty large buffer without telling us the * original size. The only way to tell the difference is by looking at * the contents, which are initialized to zero. * If the protocol version fields aren't set, this is a connection attempt * from an older version. This could could be 3.0 or 2.0 - we can't tell. * We really should have changed this for OFED 1.3 :-( */ /* Be paranoid. RDS always has privdata */ if (!event->param.conn.private_data_len) { printk(KERN_NOTICE "RDS incoming connection has no private data, " "rejecting\n"); return 0; } /* Even if len is crap *now* I still want to check it. -ASG */ if (event->param.conn.private_data_len < sizeof (*dp) || dp->dp_protocol_major == 0) return RDS_PROTOCOL_3_0; common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; if (dp->dp_protocol_major == 3 && common) { version = RDS_PROTOCOL_3_0; while ((common >>= 1) != 0) version++; } else printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n", &dp->dp_saddr, dp->dp_protocol_major, dp->dp_protocol_minor); return version; } int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; const struct rds_ib_connect_private *dp = event->param.conn.private_data; struct rds_ib_connect_private dp_rep; struct rds_connection *conn = NULL; struct rds_ib_connection *ic = NULL; struct rdma_conn_param conn_param; u32 version; int err = 1, destroy = 1; /* Check whether the remote protocol version matches ours. */ version = rds_ib_protocol_compatible(event); if (!version) goto out; rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid " "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr, RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version), (unsigned long long)be64_to_cpu(lguid), (unsigned long long)be64_to_cpu(fguid)); conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport, GFP_KERNEL); if (IS_ERR(conn)) { rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); conn = NULL; goto out; } /* * The connection request may occur while the * previous connection exist, e.g. in case of failover. * But as connections may be initiated simultaneously * by both hosts, we have a random backoff mechanism - * see the comment above rds_queue_reconnect() */ mutex_lock(&conn->c_cm_lock); if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { if (rds_conn_state(conn) == RDS_CONN_UP) { rdsdebug("incoming connect while connecting\n"); rds_conn_drop(conn); rds_ib_stats_inc(s_ib_listen_closed_stale); } else if (rds_conn_state(conn) == RDS_CONN_CONNECTING) { /* Wait and see - our connect may still be succeeding */ rds_ib_stats_inc(s_ib_connect_raced); } goto out; } ic = conn->c_transport_data; rds_ib_set_protocol(conn, version); rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); /* If the peer gave us the last packet it saw, process this as if * we had received a regular ACK. */ if (dp->dp_ack_seq) rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); BUG_ON(cm_id->context); BUG_ON(ic->i_cm_id); ic->i_cm_id = cm_id; cm_id->context = conn; /* We got halfway through setting up the ib_connection, if we * fail now, we have to take the long route out of this mess. */ destroy = 0; err = rds_ib_setup_qp(conn); if (err) { rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); goto out; } rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version, event->param.conn.responder_resources, event->param.conn.initiator_depth); /* rdma_accept() calls rdma_reject() internally if it fails */ err = rdma_accept(cm_id, &conn_param); if (err) rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); out: if (conn) mutex_unlock(&conn->c_cm_lock); if (err) rdma_reject(cm_id, NULL, 0); return destroy; } int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) { struct rds_connection *conn = cm_id->context; struct rds_ib_connection *ic = conn->c_transport_data; struct rdma_conn_param conn_param; struct rds_ib_connect_private dp; int ret; /* If the peer doesn't do protocol negotiation, we must * default to RDSv3.0 */ rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0); ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ ret = rds_ib_setup_qp(conn); if (ret) { rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret); goto out; } rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION, UINT_MAX, UINT_MAX); ret = rdma_connect(cm_id, &conn_param); if (ret) rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); out: /* Beware - returning non-zero tells the rdma_cm to destroy * the cm_id. We should certainly not do it as long as we still * "own" the cm_id. */ if (ret) { if (ic->i_cm_id == cm_id) ret = 0; } return ret; } int rds_ib_conn_connect(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; struct sockaddr_in src, dest; int ret; /* XXX I wonder what affect the port space has */ /* delegate cm event handler to rdma_transport */ ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ic->i_cm_id)) { ret = PTR_ERR(ic->i_cm_id); ic->i_cm_id = NULL; rdsdebug("rdma_create_id() failed: %d\n", ret); goto out; } rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); src.sin_family = AF_INET; src.sin_addr.s_addr = (__force u32)conn->c_laddr; src.sin_port = (__force u16)htons(0); dest.sin_family = AF_INET; dest.sin_addr.s_addr = (__force u32)conn->c_faddr; dest.sin_port = (__force u16)htons(RDS_PORT); ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, (struct sockaddr *)&dest, RDS_RDMA_RESOLVE_TIMEOUT_MS); if (ret) { rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, ret); rdma_destroy_id(ic->i_cm_id); ic->i_cm_id = NULL; } out: return ret; } /* * This is so careful about only cleaning up resources that were built up * so that it can be called at any point during startup. In fact it * can be called multiple times for a given connection. */ void rds_ib_conn_shutdown(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; int err = 0; rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, ic->i_pd, ic->i_send_cq, ic->i_recv_cq, ic->i_cm_id ? ic->i_cm_id->qp : NULL); if (ic->i_cm_id) { struct ib_device *dev = ic->i_cm_id->device; rdsdebug("disconnecting cm %p\n", ic->i_cm_id); err = rdma_disconnect(ic->i_cm_id); if (err) { /* Actually this may happen quite frequently, when * an outgoing connect raced with an incoming connect. */ rdsdebug("failed to disconnect, cm: %p err %d\n", ic->i_cm_id, err); } /* * We want to wait for tx and rx completion to finish * before we tear down the connection, but we have to be * careful not to get stuck waiting on a send ring that * only has unsignaled sends in it. We've shutdown new * sends before getting here so by waiting for signaled * sends to complete we're ensured that there will be no * more tx processing. */ wait_event(rds_ib_ring_empty_wait, rds_ib_ring_empty(&ic->i_recv_ring) && (atomic_read(&ic->i_signaled_sends) == 0)); tasklet_kill(&ic->i_recv_tasklet); if (ic->i_send_hdrs) ib_dma_free_coherent(dev, ic->i_send_ring.w_nr * sizeof(struct rds_header), ic->i_send_hdrs, ic->i_send_hdrs_dma); if (ic->i_recv_hdrs) ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr * sizeof(struct rds_header), ic->i_recv_hdrs, ic->i_recv_hdrs_dma); if (ic->i_ack) ib_dma_free_coherent(dev, sizeof(struct rds_header), ic->i_ack, ic->i_ack_dma); if (ic->i_sends) rds_ib_send_clear_ring(ic); if (ic->i_recvs) rds_ib_recv_clear_ring(ic); if (ic->i_cm_id->qp) rdma_destroy_qp(ic->i_cm_id); if (ic->i_send_cq) ib_destroy_cq(ic->i_send_cq); if (ic->i_recv_cq) ib_destroy_cq(ic->i_recv_cq); rdma_destroy_id(ic->i_cm_id); /* * Move connection back to the nodev list. */ if (ic->rds_ibdev) rds_ib_remove_conn(ic->rds_ibdev, conn); ic->i_cm_id = NULL; ic->i_pd = NULL; ic->i_mr = NULL; ic->i_send_cq = NULL; ic->i_recv_cq = NULL; ic->i_send_hdrs = NULL; ic->i_recv_hdrs = NULL; ic->i_ack = NULL; } BUG_ON(ic->rds_ibdev); /* Clear pending transmit */ if (ic->i_data_op) { struct rds_message *rm; rm = container_of(ic->i_data_op, struct rds_message, data); rds_message_put(rm); ic->i_data_op = NULL; } /* Clear the ACK state */ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); #ifdef KERNEL_HAS_ATOMIC64 atomic64_set(&ic->i_ack_next, 0); #else ic->i_ack_next = 0; #endif ic->i_ack_recv = 0; /* Clear flow control state */ ic->i_flowctl = 0; atomic_set(&ic->i_credits, 0); rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); if (ic->i_ibinc) { rds_inc_put(&ic->i_ibinc->ii_inc); ic->i_ibinc = NULL; } vfree(ic->i_sends); ic->i_sends = NULL; vfree(ic->i_recvs); ic->i_recvs = NULL; } int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_ib_connection *ic; unsigned long flags; int ret; /* XXX too lazy? */ ic = kzalloc(sizeof(struct rds_ib_connection), gfp); if (!ic) return -ENOMEM; ret = rds_ib_recv_alloc_caches(ic); if (ret) { kfree(ic); return ret; } INIT_LIST_HEAD(&ic->ib_node); tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, (unsigned long) ic); mutex_init(&ic->i_recv_mutex); #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&ic->i_ack_lock); #endif atomic_set(&ic->i_signaled_sends, 0); /* * rds_ib_conn_shutdown() waits for these to be emptied so they * must be initialized before it can be called. */ rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); ic->conn = conn; conn->c_transport_data = ic; spin_lock_irqsave(&ib_nodev_conns_lock, flags); list_add_tail(&ic->ib_node, &ib_nodev_conns); spin_unlock_irqrestore(&ib_nodev_conns_lock, flags); rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); return 0; } /* * Free a connection. Connection must be shut down and not set for reconnect. */ void rds_ib_conn_free(void *arg) { struct rds_ib_connection *ic = arg; spinlock_t *lock_ptr; rdsdebug("ic %p\n", ic); /* * Conn is either on a dev's list or on the nodev list. * A race with shutdown() or connect() would cause problems * (since rds_ibdev would change) but that should never happen. */ lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; spin_lock_irq(lock_ptr); list_del(&ic->ib_node); spin_unlock_irq(lock_ptr); rds_ib_recv_free_caches(ic); kfree(ic); } /* * An error occurred on the connection */ void __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...) { va_list ap; rds_conn_drop(conn); va_start(ap, fmt); vprintk(fmt, ap); va_end(ap); }
gpl-2.0
kgp700/Neok-GNexroid-Kernel-JB
tools/perf/util/trace-event-read.c
4219
9892
/* * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define _FILE_OFFSET_BITS 64 #include <dirent.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <stdarg.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/mman.h> #include <pthread.h> #include <fcntl.h> #include <unistd.h> #include <ctype.h> #include <errno.h> #include "../perf.h" #include "util.h" #include "trace-event.h" static int input_fd; static int read_page; int file_bigendian; int host_bigendian; static int long_size; static unsigned long page_size; static ssize_t calc_data_size; static bool repipe; static int do_read(int fd, void *buf, int size) { int rsize = size; while (size) { int ret = read(fd, buf, size); if (ret <= 0) return -1; if (repipe) { int retw = write(STDOUT_FILENO, buf, ret); if (retw <= 0 || retw != ret) die("repiping input file"); } size -= ret; buf += ret; } return rsize; } static int read_or_die(void *data, int size) { int r; r = do_read(input_fd, data, size); if (r <= 0) die("reading input file (size expected=%d received=%d)", size, r); if (calc_data_size) calc_data_size += r; return r; } /* If it fails, the next read will report it */ static void skip(int size) { char buf[BUFSIZ]; int r; while (size) { r = size > BUFSIZ ? BUFSIZ : size; read_or_die(buf, r); size -= r; }; } static unsigned int read4(void) { unsigned int data; read_or_die(&data, 4); return __data2host4(data); } static unsigned long long read8(void) { unsigned long long data; read_or_die(&data, 8); return __data2host8(data); } static char *read_string(void) { char buf[BUFSIZ]; char *str = NULL; int size = 0; off_t r; char c; for (;;) { r = read(input_fd, &c, 1); if (r < 0) die("reading input file"); if (!r) die("no data"); if (repipe) { int retw = write(STDOUT_FILENO, &c, 1); if (retw <= 0 || retw != r) die("repiping input file string"); } buf[size++] = c; if (!c) break; } if (calc_data_size) calc_data_size += size; str = malloc_or_die(size); memcpy(str, buf, size); return str; } static void read_proc_kallsyms(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size + 1); read_or_die(buf, size); buf[size] = '\0'; parse_proc_kallsyms(buf, size); free(buf); } static void read_ftrace_printk(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_printk(buf, size); free(buf); } static void read_header_files(void) { unsigned long long size; char *header_event; char buf[BUFSIZ]; read_or_die(buf, 12); if (memcmp(buf, "header_page", 12) != 0) die("did not read header page"); size = read8(); skip(size); /* * The size field in the page is of type long, * use that instead, since it represents the kernel. */ long_size = header_page_size_size; read_or_die(buf, 13); if (memcmp(buf, "header_event", 13) != 0) die("did not read header event"); size = read8(); header_event = malloc_or_die(size); read_or_die(header_event, size); free(header_event); } static void read_ftrace_file(unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_file(buf, size); free(buf); } static void read_event_file(char *sys, unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_event_file(buf, size, sys); free(buf); } static void read_ftrace_files(void) { unsigned long long size; int count; int i; count = read4(); for (i = 0; i < count; i++) { size = read8(); read_ftrace_file(size); } } static void read_event_files(void) { unsigned long long size; char *sys; int systems; int count; int i,x; systems = read4(); for (i = 0; i < systems; i++) { sys = read_string(); count = read4(); for (x=0; x < count; x++) { size = read8(); read_event_file(sys, size); } } } struct cpu_data { unsigned long long offset; unsigned long long size; unsigned long long timestamp; struct record *next; char *page; int cpu; int index; int page_size; }; static struct cpu_data *cpu_data; static void update_cpu_data_index(int cpu) { cpu_data[cpu].offset += page_size; cpu_data[cpu].size -= page_size; cpu_data[cpu].index = 0; } static void get_next_page(int cpu) { off_t save_seek; off_t ret; if (!cpu_data[cpu].page) return; if (read_page) { if (cpu_data[cpu].size <= page_size) { free(cpu_data[cpu].page); cpu_data[cpu].page = NULL; return; } update_cpu_data_index(cpu); /* other parts of the code may expect the pointer to not move */ save_seek = lseek(input_fd, 0, SEEK_CUR); ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET); if (ret == (off_t)-1) die("failed to lseek"); ret = read(input_fd, cpu_data[cpu].page, page_size); if (ret < 0) die("failed to read page"); /* reset the file pointer back */ lseek(input_fd, save_seek, SEEK_SET); return; } munmap(cpu_data[cpu].page, page_size); cpu_data[cpu].page = NULL; if (cpu_data[cpu].size <= page_size) return; update_cpu_data_index(cpu); cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, input_fd, cpu_data[cpu].offset); if (cpu_data[cpu].page == MAP_FAILED) die("failed to mmap cpu %d at offset 0x%llx", cpu, cpu_data[cpu].offset); } static unsigned int type_len4host(unsigned int type_len_ts) { if (file_bigendian) return (type_len_ts >> 27) & ((1 << 5) - 1); else return type_len_ts & ((1 << 5) - 1); } static unsigned int ts4host(unsigned int type_len_ts) { if (file_bigendian) return type_len_ts & ((1 << 27) - 1); else return type_len_ts >> 5; } static int calc_index(void *ptr, int cpu) { return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; } struct record *trace_peek_data(int cpu) { struct record *data; void *page = cpu_data[cpu].page; int idx = cpu_data[cpu].index; void *ptr = page + idx; unsigned long long extend; unsigned int type_len_ts; unsigned int type_len; unsigned int delta; unsigned int length = 0; if (cpu_data[cpu].next) return cpu_data[cpu].next; if (!page) return NULL; if (!idx) { /* FIXME: handle header page */ if (header_page_ts_size != 8) die("expected a long long type for timestamp"); cpu_data[cpu].timestamp = data2host8(ptr); ptr += 8; switch (header_page_size_size) { case 4: cpu_data[cpu].page_size = data2host4(ptr); ptr += 4; break; case 8: cpu_data[cpu].page_size = data2host8(ptr); ptr += 8; break; default: die("bad long size"); } ptr = cpu_data[cpu].page + header_page_data_offset; } read_again: idx = calc_index(ptr, cpu); if (idx >= cpu_data[cpu].page_size) { get_next_page(cpu); return trace_peek_data(cpu); } type_len_ts = data2host4(ptr); ptr += 4; type_len = type_len4host(type_len_ts); delta = ts4host(type_len_ts); switch (type_len) { case RINGBUF_TYPE_PADDING: if (!delta) die("error, hit unexpected end of page"); length = data2host4(ptr); ptr += 4; length *= 4; ptr += length; goto read_again; case RINGBUF_TYPE_TIME_EXTEND: extend = data2host4(ptr); ptr += 4; extend <<= TS_SHIFT; extend += delta; cpu_data[cpu].timestamp += extend; goto read_again; case RINGBUF_TYPE_TIME_STAMP: ptr += 12; break; case 0: length = data2host4(ptr); ptr += 4; die("here! length=%d", length); break; default: length = type_len * 4; break; } cpu_data[cpu].timestamp += delta; data = malloc_or_die(sizeof(*data)); memset(data, 0, sizeof(*data)); data->ts = cpu_data[cpu].timestamp; data->size = length; data->data = ptr; ptr += length; cpu_data[cpu].index = calc_index(ptr, cpu); cpu_data[cpu].next = data; return data; } struct record *trace_read_data(int cpu) { struct record *data; data = trace_peek_data(cpu); cpu_data[cpu].next = NULL; return data; } ssize_t trace_report(int fd, bool __repipe) { char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; int show_version = 0; int show_funcs = 0; int show_printk = 0; ssize_t size; calc_data_size = 1; repipe = __repipe; input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) die("no trace data in the file"); read_or_die(buf, 7); if (memcmp(buf, "tracing", 7) != 0) die("not a trace file (missing 'tracing' tag)"); version = read_string(); if (show_version) printf("version = %s\n", version); free(version); read_or_die(buf, 1); file_bigendian = buf[0]; host_bigendian = bigendian(); read_or_die(buf, 1); long_size = buf[0]; page_size = read4(); read_header_files(); read_ftrace_files(); read_event_files(); read_proc_kallsyms(); read_ftrace_printk(); size = calc_data_size - 1; calc_data_size = 0; repipe = false; if (show_funcs) { print_funcs(); return size; } if (show_printk) { print_printk(); return size; } return size; }
gpl-2.0
Droid-Concepts/kernel_samsung_jf
net/ipv6/xfrm6_state.c
4731
4754
/* * xfrm6_state.c: based on xfrm4_state.c * * Authors: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * YOSHIFUJI Hideaki @USAGI * Split up af-specific portion * */ #include <net/xfrm.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/netfilter_ipv6.h> #include <linux/export.h> #include <net/dsfield.h> #include <net/ipv6.h> #include <net/addrconf.h> static void __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi6 *fl6 = &fl->u.ip6; /* Initialize temporary selector matching only * to current session. */ *(struct in6_addr *)&sel->daddr = fl6->daddr; *(struct in6_addr *)&sel->saddr = fl6->saddr; sel->dport = xfrm_flowi_dport(fl, &fl6->uli); sel->dport_mask = htons(0xffff); sel->sport = xfrm_flowi_sport(fl, &fl6->uli); sel->sport_mask = htons(0xffff); sel->family = AF_INET6; sel->prefixlen_d = 128; sel->prefixlen_s = 128; sel->proto = fl6->flowi6_proto; sel->ifindex = fl6->flowi6_oif; } static void xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr) { x->id = tmpl->id; if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); if (ipv6_addr_any((struct in6_addr*)&x->props.saddr)) memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); x->props.mode = tmpl->mode; x->props.reqid = tmpl->reqid; x->props.family = AF_INET6; } /* distribution counting sort function for xfrm_state and xfrm_tmpl */ static int __xfrm6_sort(void **dst, void **src, int n, int (*cmp)(void *p), int maxclass) { int i; int class[XFRM_MAX_DEPTH]; int count[maxclass]; memset(count, 0, sizeof(count)); for (i = 0; i < n; i++) { int c; class[i] = c = cmp(src[i]); count[c]++; } for (i = 2; i < maxclass; i++) count[i] += count[i - 1]; for (i = 0; i < n; i++) { dst[count[class[i] - 1]++] = src[i]; src[i] = NULL; } return 0; } /* * Rule for xfrm_state: * * rule 1: select IPsec transport except AH * rule 2: select MIPv6 RO or inbound trigger * rule 3: select IPsec transport AH * rule 4: select IPsec tunnel * rule 5: others */ static int __xfrm6_state_sort_cmp(void *p) { struct xfrm_state *v = p; switch (v->props.mode) { case XFRM_MODE_TRANSPORT: if (v->id.proto != IPPROTO_AH) return 1; else return 3; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_IN_TRIGGER: return 2; #endif case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: return 4; } return 5; } static int __xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n) { return __xfrm6_sort((void **)dst, (void **)src, n, __xfrm6_state_sort_cmp, 6); } /* * Rule for xfrm_tmpl: * * rule 1: select IPsec transport * rule 2: select MIPv6 RO or inbound trigger * rule 3: select IPsec tunnel * rule 4: others */ static int __xfrm6_tmpl_sort_cmp(void *p) { struct xfrm_tmpl *v = p; switch (v->mode) { case XFRM_MODE_TRANSPORT: return 1; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_IN_TRIGGER: return 2; #endif case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: return 3; } return 4; } static int __xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n) { return __xfrm6_sort((void **)dst, (void **)src, n, __xfrm6_tmpl_sort_cmp, 5); } int xfrm6_extract_header(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); XFRM_MODE_SKB_CB(skb)->id = 0; XFRM_MODE_SKB_CB(skb)->frag_off = htons(IP_DF); XFRM_MODE_SKB_CB(skb)->tos = ipv6_get_dsfield(iph); XFRM_MODE_SKB_CB(skb)->ttl = iph->hop_limit; XFRM_MODE_SKB_CB(skb)->optlen = 0; memcpy(XFRM_MODE_SKB_CB(skb)->flow_lbl, iph->flow_lbl, sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl)); return 0; } static struct xfrm_state_afinfo xfrm6_state_afinfo = { .family = AF_INET6, .proto = IPPROTO_IPV6, .eth_proto = htons(ETH_P_IPV6), .owner = THIS_MODULE, .init_tempsel = __xfrm6_init_tempsel, .init_temprop = xfrm6_init_temprop, .tmpl_sort = __xfrm6_tmpl_sort, .state_sort = __xfrm6_state_sort, .output = xfrm6_output, .output_finish = xfrm6_output_finish, .extract_input = xfrm6_extract_input, .extract_output = xfrm6_extract_output, .transport_finish = xfrm6_transport_finish, }; int __init xfrm6_state_init(void) { return xfrm_state_register_afinfo(&xfrm6_state_afinfo); } void xfrm6_state_fini(void) { xfrm_state_unregister_afinfo(&xfrm6_state_afinfo); }
gpl-2.0
sakuraba001/android_kernel_samsung_klteactive
drivers/net/ethernet/broadcom/sb1250-mac.c
4987
66470
/* * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * This driver is designed for the Broadcom SiByte SOC built-in * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. * * Updated to the driver model and the PHY abstraction layer * by Maciej W. Rozycki. */ #include <linux/bug.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/prefetch.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/processor.h> /* Processor type for cache alignment. */ /* Operational parameters that usually are not changed. */ #define CONFIG_SBMAC_COALESCE /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); /* A few user-configurable values which may be modified when a driver module is loaded. */ /* 1 normal messages, 0 quiet .. 7 verbose. */ static int debug = 1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug messages"); #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; module_param(int_pktcnt_tx, int, S_IRUGO); MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); static int int_timeout_tx = 255; module_param(int_timeout_tx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); static int int_pktcnt_rx = 64; module_param(int_pktcnt_rx, int, S_IRUGO); MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); static int int_timeout_rx = 64; module_param(int_timeout_rx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif #include <asm/sibyte/board.h> #include <asm/sibyte/sb1250.h> #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> #define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #else #error invalid SiByte MAC configuration #endif #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/sb1250_mac.h> #include <asm/sibyte/sb1250_dma.h> #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) #define UNIT_INT(n) (K_INT_MAC_0 + (n)) #else #error invalid SiByte MAC configuration #endif #ifdef K_INT_PHY #define SBMAC_PHY_INT K_INT_PHY #else #define SBMAC_PHY_INT PHY_POLL #endif /********************************************************************** * Simple types ********************************************************************* */ enum sbmac_speed { sbmac_speed_none = 0, sbmac_speed_10 = SPEED_10, sbmac_speed_100 = SPEED_100, sbmac_speed_1000 = SPEED_1000, }; enum sbmac_duplex { sbmac_duplex_none = -1, sbmac_duplex_half = DUPLEX_HALF, sbmac_duplex_full = DUPLEX_FULL, }; enum sbmac_fc { sbmac_fc_none, sbmac_fc_disabled, sbmac_fc_frame, sbmac_fc_collision, sbmac_fc_carrier, }; enum sbmac_state { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, sbmac_state_broken, }; /********************************************************************** * Macros ********************************************************************* */ #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ (d)->sbdma_dscrtable : (d)->f+1) #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ /********************************************************************** * DMA Descriptor structure ********************************************************************* */ struct sbdmadscr { uint64_t dscr_a; uint64_t dscr_b; }; /********************************************************************** * DMA Controller structure ********************************************************************* */ struct sbmacdma { /* * This stuff is used to identify the channel and the registers * associated with it. */ struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ int sbdma_channel; /* channel number */ int sbdma_txdir; /* direction (1=transmit) */ int sbdma_maxdescr; /* total # of descriptors in ring */ #ifdef CONFIG_SBMAC_COALESCE int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt */ int sbdma_int_timeout; /* # usec rx/tx interrupt */ #endif void __iomem *sbdma_config0; /* DMA config register 0 */ void __iomem *sbdma_config1; /* DMA config register 1 */ void __iomem *sbdma_dscrbase; /* descriptor base address */ void __iomem *sbdma_dscrcnt; /* descriptor count register */ void __iomem *sbdma_curdscr; /* current descriptor address */ void __iomem *sbdma_oodpktlost; /* pkt drop (rx only) */ /* * This stuff is for maintenance of the ring */ void *sbdma_dscrtable_unaligned; struct sbdmadscr *sbdma_dscrtable; /* base of descriptor table */ struct sbdmadscr *sbdma_dscrtable_end; /* end of descriptor table */ struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ dma_addr_t sbdma_dscrtable_phys; /* and also the phys addr */ struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ struct sbdmadscr *sbdma_remptr; /* next dscr for sw to remove */ }; /********************************************************************** * Ethernet softc structure ********************************************************************* */ struct sbmac_softc { /* * Linux-specific things */ struct net_device *sbm_dev; /* pointer to linux device */ struct napi_struct napi; struct phy_device *phy_dev; /* the associated PHY device */ struct mii_bus *mii_bus; /* the MII bus */ int phy_irq[PHY_MAX_ADDR]; spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ /* * Controller-specific things */ void __iomem *sbm_base; /* MAC's base address */ enum sbmac_state sbm_state; /* current state */ void __iomem *sbm_macenable; /* MAC Enable Register */ void __iomem *sbm_maccfg; /* MAC Config Register */ void __iomem *sbm_fifocfg; /* FIFO Config Register */ void __iomem *sbm_framecfg; /* Frame Config Register */ void __iomem *sbm_rxfilter; /* Receive Filter Register */ void __iomem *sbm_isr; /* Interrupt Status Register */ void __iomem *sbm_imr; /* Interrupt Mask Register */ void __iomem *sbm_mdio; /* MDIO Register */ enum sbmac_speed sbm_speed; /* current speed */ enum sbmac_duplex sbm_duplex; /* current duplex */ enum sbmac_fc sbm_fc; /* cur. flow control setting */ int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */ unsigned char sbm_hwaddr[ETH_ALEN]; struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; int rx_hw_checksum; int sbe_idx; }; /********************************************************************** * Externs ********************************************************************* */ /********************************************************************** * Prototypes ********************************************************************* */ static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr); static void sbdma_channel_start(struct sbmacdma *d, int rxtx); static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); static void sbdma_emptyring(struct sbmacdma *d); static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll); static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, int poll); static int sbmac_initctx(struct sbmac_softc *s); static void sbmac_channel_start(struct sbmac_softc *s); static void sbmac_channel_stop(struct sbmac_softc *s); static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, enum sbmac_state); static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, enum sbmac_fc fc); static int sbmac_open(struct net_device *dev); static void sbmac_tx_timeout (struct net_device *dev); static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); static int sbmac_poll(struct napi_struct *napi, int budget); static void sbmac_mii_poll(struct net_device *dev); static int sbmac_mii_probe(struct net_device *dev); static void sbmac_mii_sync(void __iomem *sbm_mdio); static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, int bitcnt); static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 val); /********************************************************************** * Globals ********************************************************************* */ static char sbmac_string[] = "sb1250-mac"; static char sbmac_mdio_string[] = "sb1250-mac-mdio"; /********************************************************************** * MDIO constants ********************************************************************* */ #define MII_COMMAND_START 0x01 #define MII_COMMAND_READ 0x02 #define MII_COMMAND_WRITE 0x01 #define MII_COMMAND_ACK 0x02 #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ #define ENABLE 1 #define DISABLE 0 /********************************************************************** * SBMAC_MII_SYNC(sbm_mdio) * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * * Return value: * nothing ********************************************************************* */ static void sbmac_mii_sync(void __iomem *sbm_mdio) { int cnt; uint64_t bits; int mac_mdio_genc; mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); for (cnt = 0; cnt < 32; cnt++) { __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | mac_mdio_genc, sbm_mdio); } } /********************************************************************** * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * data - data to send * bitcnt - number of bits to send ********************************************************************* */ static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, int bitcnt) { int i; uint64_t bits; unsigned int curmask; int mac_mdio_genc; mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask = 1 << (bitcnt - 1); for (i = 0; i < bitcnt; i++) { if (data & curmask) bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask >>= 1; } } /********************************************************************** * SBMAC_MII_READ(bus, phyaddr, regidx) * Read a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY's address * regnum - index of register to read * * Return value: * value read, or 0xffff if an error occurred. ********************************************************************* */ static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int idx; int error; int regval; int mac_mdio_genc; /* * Synchronize ourselves so that the PHY knows the next * thing coming down is a command */ sbmac_mii_sync(sbm_mdio); /* * Send the data to the PHY. The sequence is * a "start" command (2 bits) * a "read" command (2 bits) * the PHY addr (5 bits) * the register index (5 bits) */ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); sbmac_mii_senddata(sbm_mdio, phyaddr, 5); sbmac_mii_senddata(sbm_mdio, regidx, 5); mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; /* * Switch the port around without a clock transition. */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * Send out a clock pulse to signal we want the status */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * If an error occurred, the PHY will signal '1' back */ error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; /* * Issue an 'idle' clock pulse, but keep the direction * the same. */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); regval = 0; for (idx = 0; idx < 16; idx++) { regval <<= 1; if (error == 0) { if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) regval |= 1; } __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); } /* Switch back to output */ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); if (error == 0) return regval; return 0xffff; } /********************************************************************** * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) * * Write a value to a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY to use * regidx - register within the PHY * regval - data to write to register * * Return value: * 0 for success ********************************************************************* */ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 regval) { struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int mac_mdio_genc; sbmac_mii_sync(sbm_mdio); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); sbmac_mii_senddata(sbm_mdio, phyaddr, 5); sbmac_mii_senddata(sbm_mdio, regidx, 5); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); sbmac_mii_senddata(sbm_mdio, regval, 16); mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); return 0; } /********************************************************************** * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) * * Initialize a DMA channel context. Since there are potentially * eight DMA channels per MAC, it's nice to do this in a standard * way. * * Input parameters: * d - struct sbmacdma (DMA channel context) * s - struct sbmac_softc (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors * * Return value: * nothing ********************************************************************* */ static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr) { #ifdef CONFIG_SBMAC_COALESCE int int_pktcnt, int_timeout; #endif /* * Save away interesting stuff in the structure */ d->sbdma_eth = s; d->sbdma_channel = chan; d->sbdma_txdir = txrx; #if 0 /* RMON clearing */ s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; #endif __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); /* * initialize register pointers */ d->sbdma_config0 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); d->sbdma_config1 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); d->sbdma_dscrbase = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); d->sbdma_dscrcnt = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); d->sbdma_curdscr = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); if (d->sbdma_txdir) d->sbdma_oodpktlost = NULL; else d->sbdma_oodpktlost = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); /* * Allocate memory for the ring */ d->sbdma_maxdescr = maxdescr; d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, sizeof(*d->sbdma_dscrtable), GFP_KERNEL); /* * The descriptor table must be aligned to at least 16 bytes or the * MAC will corrupt it. */ d->sbdma_dscrtable = (struct sbdmadscr *) ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, sizeof(*d->sbdma_dscrtable)); d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); /* * And context table */ d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, sizeof(*d->sbdma_ctxtable), GFP_KERNEL); #ifdef CONFIG_SBMAC_COALESCE /* * Setup Rx/Tx DMA coalescing defaults */ int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; if ( int_pktcnt ) { d->sbdma_int_pktcnt = int_pktcnt; } else { d->sbdma_int_pktcnt = 1; } int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; if ( int_timeout ) { d->sbdma_int_timeout = int_timeout; } else { d->sbdma_int_timeout = 0; } #endif } /********************************************************************** * SBDMA_CHANNEL_START(d) * * Initialize the hardware registers for a DMA channel. * * Input parameters: * d - DMA channel to init (context must be previously init'd * rxtx - DMA_RX or DMA_TX depending on what type of channel * * Return value: * nothing ********************************************************************* */ static void sbdma_channel_start(struct sbmacdma *d, int rxtx) { /* * Turn on the DMA channel */ #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | 0, d->sbdma_config1); __raw_writeq(M_DMA_EOP_INT_EN | V_DMA_RINGSZ(d->sbdma_maxdescr) | V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | 0, d->sbdma_config0); #else __raw_writeq(0, d->sbdma_config1); __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | 0, d->sbdma_config0); #endif __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); /* * Initialize ring pointers */ d->sbdma_addptr = d->sbdma_dscrtable; d->sbdma_remptr = d->sbdma_dscrtable; } /********************************************************************** * SBDMA_CHANNEL_STOP(d) * * Initialize the hardware registers for a DMA channel. * * Input parameters: * d - DMA channel to init (context must be previously init'd * * Return value: * nothing ********************************************************************* */ static void sbdma_channel_stop(struct sbmacdma *d) { /* * Turn off the DMA channel */ __raw_writeq(0, d->sbdma_config1); __raw_writeq(0, d->sbdma_dscrbase); __raw_writeq(0, d->sbdma_config0); /* * Zero ring pointers */ d->sbdma_addptr = NULL; d->sbdma_remptr = NULL; } static inline void sbdma_align_skb(struct sk_buff *skb, unsigned int power2, unsigned int offset) { unsigned char *addr = skb->data; unsigned char *newaddr = PTR_ALIGN(addr, power2); skb_reserve(skb, newaddr - addr + offset); } /********************************************************************** * SBDMA_ADD_RCVBUFFER(d,sb) * * Add a buffer to the specified DMA channel. For receive channels, * this queues a buffer for inbound packets. * * Input parameters: * sc - softc structure * d - DMA channel descriptor * sb - sk_buff to add, or NULL if we should allocate one * * Return value: * 0 if buffer could not be added (ring is full) * 1 if buffer added successfully ********************************************************************* */ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, struct sk_buff *sb) { struct net_device *dev = sc->sbm_dev; struct sbdmadscr *dsc; struct sbdmadscr *nextdsc; struct sk_buff *sb_new = NULL; int pktsize = ENET_PACKET_SIZE; /* get pointer to our current place in the ring */ dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } /* * Allocate a sk_buff if we don't already have one. * If we do have an sk_buff, reset it so that it's empty. * * Note: sk_buffs don't seem to be guaranteed to have any sort * of alignment when they are allocated. Therefore, allocate enough * extra space to make sure that: * * 1. the data does not start in the middle of a cache line. * 2. The data does not end in the middle of a cache line * 3. The buffer can be aligned such that the IP addresses are * naturally aligned. * * Remember, the SOCs MAC writes whole cache lines at a time, * without reading the old contents first. So, if the sk_buff's * data portion starts in the middle of a cache line, the SOC * DMA will trash the beginning (and ending) portions. */ if (sb == NULL) { sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + NET_IP_ALIGN); if (sb_new == NULL) { pr_info("%s: sk_buff allocation failed\n", d->sbdma_eth->sbm_dev->name); return -ENOBUFS; } sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); } else { sb_new = sb; /* * nothing special to reinit buffer, it's already aligned * and sb->data already points to a good place. */ } /* * fill in the descriptor */ #ifdef CONFIG_SBMAC_COALESCE /* * Do not interrupt per DMA transfer. */ dsc->dscr_a = virt_to_phys(sb_new->data) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; #else dsc->dscr_a = virt_to_phys(sb_new->data) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | M_DMA_DSCRA_INTERRUPT; #endif /* receiving: no options */ dsc->dscr_b = 0; /* * fill in the context */ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; /* * point at next packet */ d->sbdma_addptr = nextdsc; /* * Give the buffer to the DMA engine. */ __raw_writeq(1, d->sbdma_dscrcnt); return 0; /* we did it */ } /********************************************************************** * SBDMA_ADD_TXBUFFER(d,sb) * * Add a transmit buffer to the specified DMA channel, causing a * transmit to start. * * Input parameters: * d - DMA channel descriptor * sb - sk_buff to add * * Return value: * 0 transmit queued successfully * otherwise error code ********************************************************************* */ static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) { struct sbdmadscr *dsc; struct sbdmadscr *nextdsc; uint64_t phys; uint64_t ncb; int length; /* get pointer to our current place in the ring */ dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } /* * Under Linux, it's not necessary to copy/coalesce buffers * like it is on NetBSD. We think they're all contiguous, * but that may not be true for GBE. */ length = sb->len; /* * fill in the descriptor. Note that the number of cache * blocks in the descriptor is the number of blocks * *spanned*, so we need to add in the offset (if any) * while doing the calculation. */ phys = virt_to_phys(sb->data); ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); dsc->dscr_a = phys | V_DMA_DSCRA_A_SIZE(ncb) | #ifndef CONFIG_SBMAC_COALESCE M_DMA_DSCRA_INTERRUPT | #endif M_DMA_ETHTX_SOP; /* transmitting: set outbound options and length */ dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | V_DMA_DSCRB_PKT_SIZE(length); /* * fill in the context */ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; /* * point at next packet */ d->sbdma_addptr = nextdsc; /* * Give the buffer to the DMA engine. */ __raw_writeq(1, d->sbdma_dscrcnt); return 0; /* we did it */ } /********************************************************************** * SBDMA_EMPTYRING(d) * * Free all allocated sk_buffs on the specified DMA channel; * * Input parameters: * d - DMA channel * * Return value: * nothing ********************************************************************* */ static void sbdma_emptyring(struct sbmacdma *d) { int idx; struct sk_buff *sb; for (idx = 0; idx < d->sbdma_maxdescr; idx++) { sb = d->sbdma_ctxtable[idx]; if (sb) { dev_kfree_skb(sb); d->sbdma_ctxtable[idx] = NULL; } } } /********************************************************************** * SBDMA_FILLRING(d) * * Fill the specified DMA channel (must be receive channel) * with sk_buffs * * Input parameters: * sc - softc structure * d - DMA channel * * Return value: * nothing ********************************************************************* */ static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) { int idx; for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) break; } } #ifdef CONFIG_NET_POLL_CONTROLLER static void sbmac_netpoll(struct net_device *netdev) { struct sbmac_softc *sc = netdev_priv(netdev); int irq = sc->sbm_dev->irq; __raw_writeq(0, sc->sbm_imr); sbmac_intr(irq, netdev); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), sc->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); #endif } #endif /********************************************************************** * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) * * Process "completed" receive buffers on the specified DMA channel. * * Input parameters: * sc - softc structure * d - DMA channel context * work_to_do - no. of packets to process before enabling interrupt * again (for NAPI) * poll - 1: using polling (for NAPI) * * Return value: * nothing ********************************************************************* */ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll) { struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; int len; int work_done = 0; int dropped = 0; prefetch(d); again: /* Check if the HW dropped any frames */ dev->stats.rx_fifo_errors += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); while (work_to_do-- > 0) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ dsc = d->sbdma_remptr; curidx = dsc - d->sbdma_dscrtable; prefetch(dsc); prefetch(&d->sbdma_ctxtable[curidx]); hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); /* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ if (curidx == hwidx) goto done; /* * Otherwise, get the packet's sk_buff ptr back */ sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; /* * Check packet status. If good, process it. * If not, silently drop it and put it back on the * receive ring. */ if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { /* * Add a new buffer to replace the old one. If we fail * to allocate a buffer, we're going to drop this * packet and put it right back on the receive ring. */ if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == -ENOBUFS)) { dev->stats.rx_dropped++; /* Re-add old buffer */ sbdma_add_rcvbuffer(sc, d, sb); /* No point in continuing at the moment */ printk(KERN_ERR "dropped packet (1)\n"); d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { /* * Set length into the packet */ skb_put(sb,len); /* * Buffer has been replaced on the * receive ring. Pass the buffer to * the kernel */ sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); /* Check hw IPv4/TCP checksum if supported */ if (sc->rx_hw_checksum == ENABLE) { if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { sb->ip_summed = CHECKSUM_UNNECESSARY; /* don't need to set sb->csum */ } else { skb_checksum_none_assert(sb); } } prefetch(sb->data); prefetch((const void *)(((char *)sb->data)+32)); if (poll) dropped = netif_receive_skb(sb); else dropped = netif_rx(sb); if (dropped == NET_RX_DROP) { dev->stats.rx_dropped++; d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { dev->stats.rx_bytes += len; dev->stats.rx_packets++; } } } else { /* * Packet was mangled somehow. Just drop it and * put it back on the receive ring. */ dev->stats.rx_errors++; sbdma_add_rcvbuffer(sc, d, sb); } /* * .. and advance to the next buffer. */ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); work_done++; } if (!poll) { work_to_do = 32; goto again; /* collect fifo drop statistics again */ } done: return work_done; } /********************************************************************** * SBDMA_TX_PROCESS(sc,d) * * Process "completed" transmit buffers on the specified DMA channel. * This is normally called within the interrupt service routine. * Note that this isn't really ideal for priority channels, since * it processes all of the packets on a given channel before * returning. * * Input parameters: * sc - softc structure * d - DMA channel context * poll - 1: using polling (for NAPI) * * Return value: * nothing ********************************************************************* */ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, int poll) { struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; unsigned long flags; int packets_handled = 0; spin_lock_irqsave(&(sc->sbm_lock), flags); if (d->sbdma_remptr == d->sbdma_addptr) goto end_unlock; hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); for (;;) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ curidx = d->sbdma_remptr - d->sbdma_dscrtable; /* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ if (curidx == hwidx) break; /* * Otherwise, get the packet's sk_buff ptr back */ dsc = &(d->sbdma_dscrtable[curidx]); sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; /* * Stats */ dev->stats.tx_bytes += sb->len; dev->stats.tx_packets++; /* * for transmits, we just free buffers. */ dev_kfree_skb_irq(sb); /* * .. and advance to the next buffer. */ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); packets_handled++; } /* * Decide if we should wake up the protocol or not. * Other drivers seem to do this when we reach a low * watermark on the transmit queue. */ if (packets_handled) netif_wake_queue(d->sbdma_eth->sbm_dev); end_unlock: spin_unlock_irqrestore(&(sc->sbm_lock), flags); } /********************************************************************** * SBMAC_INITCTX(s) * * Initialize an Ethernet context structure - this is called * once per MAC on the 1250. Memory is allocated here, so don't * call it again from inside the ioctl routines that bring the * interface up/down * * Input parameters: * s - sbmac context structure * * Return value: * 0 ********************************************************************* */ static int sbmac_initctx(struct sbmac_softc *s) { /* * figure out the addresses of some ports */ s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; s->sbm_maccfg = s->sbm_base + R_MAC_CFG; s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; s->sbm_isr = s->sbm_base + R_MAC_STATUS; s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; s->sbm_mdio = s->sbm_base + R_MAC_MDIO; /* * Initialize the DMA channels. Right now, only one per MAC is used * Note: Only do this _once_, as it allocates memory from the kernel! */ sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); /* * initial state is OFF */ s->sbm_state = sbmac_state_off; return 0; } static void sbdma_uninitctx(struct sbmacdma *d) { if (d->sbdma_dscrtable_unaligned) { kfree(d->sbdma_dscrtable_unaligned); d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; } if (d->sbdma_ctxtable) { kfree(d->sbdma_ctxtable); d->sbdma_ctxtable = NULL; } } static void sbmac_uninitctx(struct sbmac_softc *sc) { sbdma_uninitctx(&(sc->sbm_txdma)); sbdma_uninitctx(&(sc->sbm_rxdma)); } /********************************************************************** * SBMAC_CHANNEL_START(s) * * Start packet processing on this MAC. * * Input parameters: * s - sbmac structure * * Return value: * nothing ********************************************************************* */ static void sbmac_channel_start(struct sbmac_softc *s) { uint64_t reg; void __iomem *port; uint64_t cfg,fifo,framecfg; int idx, th_value; /* * Don't do this if running */ if (s->sbm_state == sbmac_state_on) return; /* * Bring the controller out of reset, but leave it off. */ __raw_writeq(0, s->sbm_macenable); /* * Ignore all received packets */ __raw_writeq(0, s->sbm_rxfilter); /* * Calculate values for various control registers. */ cfg = M_MAC_RETRY_EN | M_MAC_TX_HOLD_SOP_EN | V_MAC_TX_PAUSE_CNT_16K | M_MAC_AP_STAT_EN | M_MAC_FAST_SYNC | M_MAC_SS_EN | 0; /* * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above * Use a larger RD_THRSH for gigabit */ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) th_value = 28; else th_value = 64; fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ ((s->sbm_speed == sbmac_speed_1000) ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | V_MAC_TX_RL_THRSH(4) | V_MAC_RX_PL_THRSH(4) | V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ V_MAC_RX_RL_THRSH(8) | 0; framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | V_MAC_MAX_FRAMESZ_DEFAULT | V_MAC_BACKOFF_SEL(1); /* * Clear out the hash address map */ port = s->sbm_base + R_MAC_HASH_BASE; for (idx = 0; idx < MAC_HASH_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Clear out the exact-match table */ port = s->sbm_base + R_MAC_ADDR_BASE; for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Clear out the DMA Channel mapping table registers */ port = s->sbm_base + R_MAC_CHUP0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } port = s->sbm_base + R_MAC_CHLO0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Program the hardware address. It goes into the hardware-address * register as well as the first filter register. */ reg = sbmac_addr2reg(s->sbm_hwaddr); port = s->sbm_base + R_MAC_ADDR_BASE; __raw_writeq(reg, port); port = s->sbm_base + R_MAC_ETHERNET_ADDR; #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* * Pass1 SOCs do not receive packets addressed to the * destination address in the R_MAC_ETHERNET_ADDR register. * Set the value to zero. */ __raw_writeq(0, port); #else __raw_writeq(reg, port); #endif /* * Set the receive filter for no packets, and write values * to the various config registers */ __raw_writeq(0, s->sbm_rxfilter); __raw_writeq(0, s->sbm_imr); __raw_writeq(framecfg, s->sbm_framecfg); __raw_writeq(fifo, s->sbm_fifocfg); __raw_writeq(cfg, s->sbm_maccfg); /* * Initialize DMA channels (rings should be ok now) */ sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); sbdma_channel_start(&(s->sbm_txdma), DMA_TX); /* * Configure the speed, duplex, and flow control */ sbmac_set_speed(s,s->sbm_speed); sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); /* * Fill the receive ring */ sbdma_fillring(s, &(s->sbm_rxdma)); /* * Turn on the rest of the bits in the enable register */ #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) __raw_writeq(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0, s->sbm_macenable); #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) __raw_writeq(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 | M_MAC_RX_ENABLE | M_MAC_TX_ENABLE, s->sbm_macenable); #else #error invalid SiByte MAC configuration #endif #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); #endif /* * Enable receiving unicasts and broadcasts */ __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); /* * we're running now. */ s->sbm_state = sbmac_state_on; /* * Program multicast addresses */ sbmac_setmulti(s); /* * If channel was in promiscuous mode before, turn that on */ if (s->sbm_devflags & IFF_PROMISC) { sbmac_promiscuous_mode(s,1); } } /********************************************************************** * SBMAC_CHANNEL_STOP(s) * * Stop packet processing on this MAC. * * Input parameters: * s - sbmac structure * * Return value: * nothing ********************************************************************* */ static void sbmac_channel_stop(struct sbmac_softc *s) { /* don't do this if already stopped */ if (s->sbm_state == sbmac_state_off) return; /* don't accept any packets, disable all interrupts */ __raw_writeq(0, s->sbm_rxfilter); __raw_writeq(0, s->sbm_imr); /* Turn off ticker */ /* XXX */ /* turn off receiver and transmitter */ __raw_writeq(0, s->sbm_macenable); /* We're stopped now. */ s->sbm_state = sbmac_state_off; /* * Stop DMA channels (rings should be ok now) */ sbdma_channel_stop(&(s->sbm_rxdma)); sbdma_channel_stop(&(s->sbm_txdma)); /* Empty the receive and transmit rings */ sbdma_emptyring(&(s->sbm_rxdma)); sbdma_emptyring(&(s->sbm_txdma)); } /********************************************************************** * SBMAC_SET_CHANNEL_STATE(state) * * Set the channel's state ON or OFF * * Input parameters: * state - new state * * Return value: * old state ********************************************************************* */ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, enum sbmac_state state) { enum sbmac_state oldstate = sc->sbm_state; /* * If same as previous state, return */ if (state == oldstate) { return oldstate; } /* * If new state is ON, turn channel on */ if (state == sbmac_state_on) { sbmac_channel_start(sc); } else { sbmac_channel_stop(sc); } /* * Return previous state */ return oldstate; } /********************************************************************** * SBMAC_PROMISCUOUS_MODE(sc,onoff) * * Turn on or off promiscuous mode * * Input parameters: * sc - softc * onoff - 1 to turn on, 0 to turn off * * Return value: * nothing ********************************************************************* */ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) { uint64_t reg; if (sc->sbm_state != sbmac_state_on) return; if (onoff) { reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_ALLPKT_EN; __raw_writeq(reg, sc->sbm_rxfilter); } else { reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_ALLPKT_EN; __raw_writeq(reg, sc->sbm_rxfilter); } } /********************************************************************** * SBMAC_SETIPHDR_OFFSET(sc,onoff) * * Set the iphdr offset as 15 assuming ethernet encapsulation * * Input parameters: * sc - softc * * Return value: * nothing ********************************************************************* */ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) { uint64_t reg; /* Hard code the off set to 15 for now */ reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); __raw_writeq(reg, sc->sbm_rxfilter); /* BCM1250 pass1 didn't have hardware checksum. Everything later does. */ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { sc->rx_hw_checksum = DISABLE; } else { sc->rx_hw_checksum = ENABLE; } } /********************************************************************** * SBMAC_ADDR2REG(ptr) * * Convert six bytes into the 64-bit register value that * we typically write into the SBMAC's address/mcast registers * * Input parameters: * ptr - pointer to 6 bytes * * Return value: * register value ********************************************************************* */ static uint64_t sbmac_addr2reg(unsigned char *ptr) { uint64_t reg = 0; ptr += 6; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); return reg; } /********************************************************************** * SBMAC_SET_SPEED(s,speed) * * Configure LAN speed for the specified MAC. * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * speed - speed to set MAC to (see enum sbmac_speed) * * Return value: * 1 if successful * 0 indicates invalid parameters ********************************************************************* */ static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) { uint64_t cfg; uint64_t framecfg; /* * Save new current values */ s->sbm_speed = speed; if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ /* * Read current register values */ cfg = __raw_readq(s->sbm_maccfg); framecfg = __raw_readq(s->sbm_framecfg); /* * Mask out the stuff we want to change */ cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | M_MAC_SLOT_SIZE); /* * Now add in the new bits */ switch (speed) { case sbmac_speed_10: framecfg |= V_MAC_IFG_RX_10 | V_MAC_IFG_TX_10 | K_MAC_IFG_THRSH_10 | V_MAC_SLOT_SIZE_10; cfg |= V_MAC_SPEED_SEL_10MBPS; break; case sbmac_speed_100: framecfg |= V_MAC_IFG_RX_100 | V_MAC_IFG_TX_100 | V_MAC_IFG_THRSH_100 | V_MAC_SLOT_SIZE_100; cfg |= V_MAC_SPEED_SEL_100MBPS ; break; case sbmac_speed_1000: framecfg |= V_MAC_IFG_RX_1000 | V_MAC_IFG_TX_1000 | V_MAC_IFG_THRSH_1000 | V_MAC_SLOT_SIZE_1000; cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; break; default: return 0; } /* * Send the bits back to the hardware */ __raw_writeq(framecfg, s->sbm_framecfg); __raw_writeq(cfg, s->sbm_maccfg); return 1; } /********************************************************************** * SBMAC_SET_DUPLEX(s,duplex,fc) * * Set Ethernet duplex and flow control options for this MAC * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * duplex - duplex setting (see enum sbmac_duplex) * fc - flow control setting (see enum sbmac_fc) * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified ********************************************************************* */ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, enum sbmac_fc fc) { uint64_t cfg; /* * Save new current values */ s->sbm_duplex = duplex; s->sbm_fc = fc; if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ /* * Read current register values */ cfg = __raw_readq(s->sbm_maccfg); /* * Mask off the stuff we're about to change */ cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); switch (duplex) { case sbmac_duplex_half: switch (fc) { case sbmac_fc_disabled: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; break; case sbmac_fc_collision: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; break; case sbmac_fc_carrier: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break; case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0; } break; case sbmac_duplex_full: switch (fc) { case sbmac_fc_disabled: cfg |= V_MAC_FC_CMD_DISABLED; break; case sbmac_fc_frame: cfg |= V_MAC_FC_CMD_ENABLED; break; case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ default: return 0; } break; default: return 0; } /* * Send the bits back to the hardware */ __raw_writeq(cfg, s->sbm_maccfg); return 1; } /********************************************************************** * SBMAC_INTR() * * Interrupt handler for MAC interrupts * * Input parameters: * MAC structure * * Return value: * nothing ********************************************************************* */ static irqreturn_t sbmac_intr(int irq,void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct sbmac_softc *sc = netdev_priv(dev); uint64_t isr; int handled = 0; /* * Read the ISR (this clears the bits in the real * register, except for counter addr) */ isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; if (isr == 0) return IRQ_RETVAL(0); handled = 1; /* * Transmits on channel 0 */ if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) sbdma_tx_process(sc,&(sc->sbm_txdma), 0); if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { if (napi_schedule_prep(&sc->napi)) { __raw_writeq(0, sc->sbm_imr); __napi_schedule(&sc->napi); /* Depend on the exit from poll to reenable intr */ } else { /* may leave some packets behind */ sbdma_rx_process(sc,&(sc->sbm_rxdma), SBMAC_MAX_RXDESCR * 2, 0); } } return IRQ_RETVAL(handled); } /********************************************************************** * SBMAC_START_TX(skb,dev) * * Start output on the specified interface. Basically, we * queue as many buffers as we can until the ring fills up, or * we run off the end of the queue, whichever comes first. * * Input parameters: * * * Return value: * nothing ********************************************************************* */ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; /* lock eth irq */ spin_lock_irqsave(&sc->sbm_lock, flags); /* * Put the buffer on the transmit ring. If we * don't have room, stop the queue. */ if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { /* XXX save skb that we could not send */ netif_stop_queue(dev); spin_unlock_irqrestore(&sc->sbm_lock, flags); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&sc->sbm_lock, flags); return NETDEV_TX_OK; } /********************************************************************** * SBMAC_SETMULTI(sc) * * Reprogram the multicast table into the hardware, given * the list of multicasts associated with the interface * structure. * * Input parameters: * sc - softc * * Return value: * nothing ********************************************************************* */ static void sbmac_setmulti(struct sbmac_softc *sc) { uint64_t reg; void __iomem *port; int idx; struct netdev_hw_addr *ha; struct net_device *dev = sc->sbm_dev; /* * Clear out entire multicast table. We do this by nuking * the entire hash table and all the direct matches except * the first one, which is used for our station address */ for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); __raw_writeq(0, port); } for (idx = 0; idx < MAC_HASH_COUNT; idx++) { port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); __raw_writeq(0, port); } /* * Clear the filter to say we don't want any multicasts. */ reg = __raw_readq(sc->sbm_rxfilter); reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); __raw_writeq(reg, sc->sbm_rxfilter); if (dev->flags & IFF_ALLMULTI) { /* * Enable ALL multicasts. Do this by inverting the * multicast enable bit. */ reg = __raw_readq(sc->sbm_rxfilter); reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); __raw_writeq(reg, sc->sbm_rxfilter); return; } /* * Progam new multicast entries. For now, only use the * perfect filter. In the future we'll need to use the * hash filter if the perfect filter overflows */ /* XXX only using perfect filter for now, need to use hash * XXX if the table overflows */ idx = 1; /* skip station address */ netdev_for_each_mc_addr(ha, dev) { if (idx == MAC_ADDR_COUNT) break; reg = sbmac_addr2reg(ha->addr); port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); __raw_writeq(reg, port); idx++; } /* * Enable the "accept multicast bits" if we programmed at least one * multicast. */ if (idx > 1) { reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_MCAST_EN; __raw_writeq(reg, sc->sbm_rxfilter); } } static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) { if (new_mtu > ENET_PACKET_SIZE) return -EINVAL; _dev->mtu = new_mtu; pr_info("changing the mtu to %d\n", new_mtu); return 0; } static const struct net_device_ops sbmac_netdev_ops = { .ndo_open = sbmac_open, .ndo_stop = sbmac_close, .ndo_start_xmit = sbmac_start_tx, .ndo_set_rx_mode = sbmac_set_rx_mode, .ndo_tx_timeout = sbmac_tx_timeout, .ndo_do_ioctl = sbmac_mii_ioctl, .ndo_change_mtu = sb1250_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sbmac_netpoll, #endif }; /********************************************************************** * SBMAC_INIT(dev) * * Attach routine - init hardware and hook ourselves into linux * * Input parameters: * dev - net_device structure * * Return value: * status ********************************************************************* */ static int sbmac_init(struct platform_device *pldev, long long base) { struct net_device *dev = dev_get_drvdata(&pldev->dev); int idx = pldev->id; struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; uint64_t ea_reg; int i; int err; sc->sbm_dev = dev; sc->sbe_idx = idx; eaddr = sc->sbm_hwaddr; /* * Read the ethernet address. The firmware left this programmed * for us in the ethernet address register for each mac. */ ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); for (i = 0; i < 6; i++) { eaddr[i] = (uint8_t) (ea_reg & 0xFF); ea_reg >>= 8; } for (i = 0; i < 6; i++) { dev->dev_addr[i] = eaddr[i]; } /* * Initialize context (get pointers to registers and stuff), then * allocate the memory for the descriptor tables. */ sbmac_initctx(sc); /* * Set up Linux device callins */ spin_lock_init(&(sc->sbm_lock)); dev->netdev_ops = &sbmac_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; netif_napi_add(dev, &sc->napi, sbmac_poll, 16); dev->irq = UNIT_INT(idx); /* This is needed for PASS2 for Rx H/W checksum feature */ sbmac_set_iphdr_offset(sc); sc->mii_bus = mdiobus_alloc(); if (sc->mii_bus == NULL) { err = -ENOMEM; goto uninit_ctx; } sc->mii_bus->name = sbmac_mdio_string; snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pldev->name, idx); sc->mii_bus->priv = sc; sc->mii_bus->read = sbmac_mii_read; sc->mii_bus->write = sbmac_mii_write; sc->mii_bus->irq = sc->phy_irq; for (i = 0; i < PHY_MAX_ADDR; ++i) sc->mii_bus->irq[i] = SBMAC_PHY_INT; sc->mii_bus->parent = &pldev->dev; /* * Probe PHY address */ err = mdiobus_register(sc->mii_bus); if (err) { printk(KERN_ERR "%s: unable to register MDIO bus\n", dev->name); goto free_mdio; } dev_set_drvdata(&pldev->dev, sc->mii_bus); err = register_netdev(dev); if (err) { printk(KERN_ERR "%s.%d: unable to register netdev\n", sbmac_string, idx); goto unreg_mdio; } pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); if (sc->rx_hw_checksum == ENABLE) pr_info("%s: enabling TCP rcv checksum\n", dev->name); /* * Display Ethernet address (this is called during the config * process so we need to finish off the config message that * was being displayed) */ pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", dev->name, base, eaddr); return 0; unreg_mdio: mdiobus_unregister(sc->mii_bus); dev_set_drvdata(&pldev->dev, NULL); free_mdio: mdiobus_free(sc->mii_bus); uninit_ctx: sbmac_uninitctx(sc); return err; } static int sbmac_open(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); int err; if (debug > 1) pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); /* * map/route interrupt (clear status first, in case something * weird is pending; we haven't initialized the mac registers * yet) */ __raw_readq(sc->sbm_isr); err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); if (err) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); goto out_err; } sc->sbm_speed = sbmac_speed_none; sc->sbm_duplex = sbmac_duplex_none; sc->sbm_fc = sbmac_fc_none; sc->sbm_pause = -1; sc->sbm_link = 0; /* * Attach to the PHY */ err = sbmac_mii_probe(dev); if (err) goto out_unregister; /* * Turn on the channel */ sbmac_set_channel_state(sc,sbmac_state_on); netif_start_queue(dev); sbmac_set_rx_mode(dev); phy_start(sc->phy_dev); napi_enable(&sc->napi); return 0; out_unregister: free_irq(dev->irq, dev); out_err: return err; } static int sbmac_mii_probe(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev; int i; for (i = 0; i < PHY_MAX_ADDR; i++) { phy_dev = sc->mii_bus->phy_map[i]; if (phy_dev) break; } if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO; } phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev); } /* Remove any features not supported by the controller */ phy_dev->supported &= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; phy_dev->advertising = phy_dev->supported; pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", dev->name, phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); sc->phy_dev = phy_dev; return 0; } static void sbmac_mii_poll(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev = sc->phy_dev; unsigned long flags; enum sbmac_fc fc; int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; link_chg = (sc->sbm_link != phy_dev->link); speed_chg = (sc->sbm_speed != phy_dev->speed); duplex_chg = (sc->sbm_duplex != phy_dev->duplex); pause_chg = (sc->sbm_pause != phy_dev->pause); if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) return; /* Hmmm... */ if (!phy_dev->link) { if (link_chg) { sc->sbm_link = phy_dev->link; sc->sbm_speed = sbmac_speed_none; sc->sbm_duplex = sbmac_duplex_none; sc->sbm_fc = sbmac_fc_disabled; sc->sbm_pause = -1; pr_info("%s: link unavailable\n", dev->name); } return; } if (phy_dev->duplex == DUPLEX_FULL) { if (phy_dev->pause) fc = sbmac_fc_frame; else fc = sbmac_fc_disabled; } else fc = sbmac_fc_collision; fc_chg = (sc->sbm_fc != fc); pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); spin_lock_irqsave(&sc->sbm_lock, flags); sc->sbm_speed = phy_dev->speed; sc->sbm_duplex = phy_dev->duplex; sc->sbm_fc = fc; sc->sbm_pause = phy_dev->pause; sc->sbm_link = phy_dev->link; if ((speed_chg || duplex_chg || fc_chg) && sc->sbm_state != sbmac_state_off) { /* * something changed, restart the channel */ if (debug > 1) pr_debug("%s: restarting channel " "because PHY state changed\n", dev->name); sbmac_channel_stop(sc); sbmac_channel_start(sc); } spin_unlock_irqrestore(&sc->sbm_lock, flags); } static void sbmac_tx_timeout (struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&sc->sbm_lock, flags); dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; spin_unlock_irqrestore(&sc->sbm_lock, flags); printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); } static void sbmac_set_rx_mode(struct net_device *dev) { unsigned long flags; struct sbmac_softc *sc = netdev_priv(dev); spin_lock_irqsave(&sc->sbm_lock, flags); if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { /* * Promiscuous changed. */ if (dev->flags & IFF_PROMISC) { sbmac_promiscuous_mode(sc,1); } else { sbmac_promiscuous_mode(sc,0); } } spin_unlock_irqrestore(&sc->sbm_lock, flags); /* * Program the multicasts. Do this every time. */ sbmac_setmulti(sc); } static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct sbmac_softc *sc = netdev_priv(dev); if (!netif_running(dev) || !sc->phy_dev) return -EINVAL; return phy_mii_ioctl(sc->phy_dev, rq, cmd); } static int sbmac_close(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); napi_disable(&sc->napi); phy_stop(sc->phy_dev); sbmac_set_channel_state(sc, sbmac_state_off); netif_stop_queue(dev); if (debug > 1) pr_debug("%s: Shutting down ethercard\n", dev->name); phy_disconnect(sc->phy_dev); sc->phy_dev = NULL; free_irq(dev->irq, dev); sbdma_emptyring(&(sc->sbm_txdma)); sbdma_emptyring(&(sc->sbm_rxdma)); return 0; } static int sbmac_poll(struct napi_struct *napi, int budget) { struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); int work_done; work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { napi_complete(napi); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), sc->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); #endif } return work_done; } static int __devinit sbmac_probe(struct platform_device *pldev) { struct net_device *dev; struct sbmac_softc *sc; void __iomem *sbm_base; struct resource *res; u64 sbmac_orig_hwaddr; int err; res = platform_get_resource(pldev, IORESOURCE_MEM, 0); BUG_ON(!res); sbm_base = ioremap_nocache(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", dev_name(&pldev->dev)); err = -ENOMEM; goto out_out; } /* * The R_MAC_ETHERNET_ADDR register will be set to some nonzero * value for us by the firmware if we're going to use this MAC. * If we find a zero, skip this MAC. */ sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); if (sbmac_orig_hwaddr == 0) { err = 0; goto out_unmap; } /* * Okay, cool. Initialize this MAC. */ dev = alloc_etherdev(sizeof(struct sbmac_softc)); if (!dev) { err = -ENOMEM; goto out_unmap; } dev_set_drvdata(&pldev->dev, dev); SET_NETDEV_DEV(dev, &pldev->dev); sc = netdev_priv(dev); sc->sbm_base = sbm_base; err = sbmac_init(pldev, res->start); if (err) goto out_kfree; return 0; out_kfree: free_netdev(dev); __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); out_unmap: iounmap(sbm_base); out_out: return err; } static int __exit sbmac_remove(struct platform_device *pldev) { struct net_device *dev = dev_get_drvdata(&pldev->dev); struct sbmac_softc *sc = netdev_priv(dev); unregister_netdev(dev); sbmac_uninitctx(sc); mdiobus_unregister(sc->mii_bus); mdiobus_free(sc->mii_bus); iounmap(sc->sbm_base); free_netdev(dev); return 0; } static struct platform_driver sbmac_driver = { .probe = sbmac_probe, .remove = __exit_p(sbmac_remove), .driver = { .name = sbmac_string, .owner = THIS_MODULE, }, }; module_platform_driver(sbmac_driver);
gpl-2.0
Hardslog/android_kernel_asus_flo
sound/soc/codecs/cq93vc.c
4987
5402
/* * ALSA SoC CQ0093 Voice Codec Driver for DaVinci platforms * * Copyright (C) 2010 Texas Instruments, Inc * * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/mfd/davinci_voicecodec.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> static inline unsigned int cq93vc_read(struct snd_soc_codec *codec, unsigned int reg) { struct davinci_vc *davinci_vc = codec->control_data; return readl(davinci_vc->base + reg); } static inline int cq93vc_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct davinci_vc *davinci_vc = codec->control_data; writel(value, davinci_vc->base + reg); return 0; } static const struct snd_kcontrol_new cq93vc_snd_controls[] = { SOC_SINGLE("PGA Capture Volume", DAVINCI_VC_REG05, 0, 0x03, 0), SOC_SINGLE("Mono DAC Playback Volume", DAVINCI_VC_REG09, 0, 0x3f, 0), }; static int cq93vc_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u8 reg = cq93vc_read(codec, DAVINCI_VC_REG09) & ~DAVINCI_VC_REG09_MUTE; if (mute) cq93vc_write(codec, DAVINCI_VC_REG09, reg | DAVINCI_VC_REG09_MUTE); else cq93vc_write(codec, DAVINCI_VC_REG09, reg); return 0; } static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct davinci_vc *davinci_vc = codec->control_data; switch (freq) { case 22579200: case 27000000: case 33868800: davinci_vc->cq93vc.sysclk = freq; return 0; } return -EINVAL; } static int cq93vc_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: cq93vc_write(codec, DAVINCI_VC_REG12, DAVINCI_VC_REG12_POWER_ALL_ON); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: cq93vc_write(codec, DAVINCI_VC_REG12, DAVINCI_VC_REG12_POWER_ALL_OFF); break; case SND_SOC_BIAS_OFF: /* force all power off */ cq93vc_write(codec, DAVINCI_VC_REG12, DAVINCI_VC_REG12_POWER_ALL_OFF); break; } codec->dapm.bias_level = level; return 0; } #define CQ93VC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000) #define CQ93VC_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE) static const struct snd_soc_dai_ops cq93vc_dai_ops = { .digital_mute = cq93vc_mute, .set_sysclk = cq93vc_set_dai_sysclk, }; static struct snd_soc_dai_driver cq93vc_dai = { .name = "cq93vc-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = CQ93VC_RATES, .formats = CQ93VC_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = CQ93VC_RATES, .formats = CQ93VC_FORMATS,}, .ops = &cq93vc_dai_ops, }; static int cq93vc_resume(struct snd_soc_codec *codec) { cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int cq93vc_probe(struct snd_soc_codec *codec) { struct davinci_vc *davinci_vc = codec->dev->platform_data; davinci_vc->cq93vc.codec = codec; codec->control_data = davinci_vc; /* Set controls */ snd_soc_add_codec_controls(codec, cq93vc_snd_controls, ARRAY_SIZE(cq93vc_snd_controls)); /* Off, with power on */ cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int cq93vc_remove(struct snd_soc_codec *codec) { cq93vc_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_cq93vc = { .read = cq93vc_read, .write = cq93vc_write, .set_bias_level = cq93vc_set_bias_level, .probe = cq93vc_probe, .remove = cq93vc_remove, .resume = cq93vc_resume, }; static int cq93vc_platform_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cq93vc, &cq93vc_dai, 1); } static int cq93vc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver cq93vc_codec_driver = { .driver = { .name = "cq93vc-codec", .owner = THIS_MODULE, }, .probe = cq93vc_platform_probe, .remove = __devexit_p(cq93vc_platform_remove), }; module_platform_driver(cq93vc_codec_driver); MODULE_DESCRIPTION("Texas Instruments DaVinci ASoC CQ0093 Voice Codec Driver"); MODULE_AUTHOR("Miguel Aguilar"); MODULE_LICENSE("GPL");
gpl-2.0
alecuba16/android_kernel_iuni_msm8974
drivers/net/ethernet/broadcom/sb1250-mac.c
4987
66470
/* * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * This driver is designed for the Broadcom SiByte SOC built-in * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. * * Updated to the driver model and the PHY abstraction layer * by Maciej W. Rozycki. */ #include <linux/bug.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/prefetch.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/processor.h> /* Processor type for cache alignment. */ /* Operational parameters that usually are not changed. */ #define CONFIG_SBMAC_COALESCE /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); /* A few user-configurable values which may be modified when a driver module is loaded. */ /* 1 normal messages, 0 quiet .. 7 verbose. */ static int debug = 1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug messages"); #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; module_param(int_pktcnt_tx, int, S_IRUGO); MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); static int int_timeout_tx = 255; module_param(int_timeout_tx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); static int int_pktcnt_rx = 64; module_param(int_pktcnt_rx, int, S_IRUGO); MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); static int int_timeout_rx = 64; module_param(int_timeout_rx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif #include <asm/sibyte/board.h> #include <asm/sibyte/sb1250.h> #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> #define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #else #error invalid SiByte MAC configuration #endif #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/sb1250_mac.h> #include <asm/sibyte/sb1250_dma.h> #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) #define UNIT_INT(n) (K_INT_MAC_0 + (n)) #else #error invalid SiByte MAC configuration #endif #ifdef K_INT_PHY #define SBMAC_PHY_INT K_INT_PHY #else #define SBMAC_PHY_INT PHY_POLL #endif /********************************************************************** * Simple types ********************************************************************* */ enum sbmac_speed { sbmac_speed_none = 0, sbmac_speed_10 = SPEED_10, sbmac_speed_100 = SPEED_100, sbmac_speed_1000 = SPEED_1000, }; enum sbmac_duplex { sbmac_duplex_none = -1, sbmac_duplex_half = DUPLEX_HALF, sbmac_duplex_full = DUPLEX_FULL, }; enum sbmac_fc { sbmac_fc_none, sbmac_fc_disabled, sbmac_fc_frame, sbmac_fc_collision, sbmac_fc_carrier, }; enum sbmac_state { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, sbmac_state_broken, }; /********************************************************************** * Macros ********************************************************************* */ #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ (d)->sbdma_dscrtable : (d)->f+1) #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ /********************************************************************** * DMA Descriptor structure ********************************************************************* */ struct sbdmadscr { uint64_t dscr_a; uint64_t dscr_b; }; /********************************************************************** * DMA Controller structure ********************************************************************* */ struct sbmacdma { /* * This stuff is used to identify the channel and the registers * associated with it. */ struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ int sbdma_channel; /* channel number */ int sbdma_txdir; /* direction (1=transmit) */ int sbdma_maxdescr; /* total # of descriptors in ring */ #ifdef CONFIG_SBMAC_COALESCE int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt */ int sbdma_int_timeout; /* # usec rx/tx interrupt */ #endif void __iomem *sbdma_config0; /* DMA config register 0 */ void __iomem *sbdma_config1; /* DMA config register 1 */ void __iomem *sbdma_dscrbase; /* descriptor base address */ void __iomem *sbdma_dscrcnt; /* descriptor count register */ void __iomem *sbdma_curdscr; /* current descriptor address */ void __iomem *sbdma_oodpktlost; /* pkt drop (rx only) */ /* * This stuff is for maintenance of the ring */ void *sbdma_dscrtable_unaligned; struct sbdmadscr *sbdma_dscrtable; /* base of descriptor table */ struct sbdmadscr *sbdma_dscrtable_end; /* end of descriptor table */ struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ dma_addr_t sbdma_dscrtable_phys; /* and also the phys addr */ struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ struct sbdmadscr *sbdma_remptr; /* next dscr for sw to remove */ }; /********************************************************************** * Ethernet softc structure ********************************************************************* */ struct sbmac_softc { /* * Linux-specific things */ struct net_device *sbm_dev; /* pointer to linux device */ struct napi_struct napi; struct phy_device *phy_dev; /* the associated PHY device */ struct mii_bus *mii_bus; /* the MII bus */ int phy_irq[PHY_MAX_ADDR]; spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ /* * Controller-specific things */ void __iomem *sbm_base; /* MAC's base address */ enum sbmac_state sbm_state; /* current state */ void __iomem *sbm_macenable; /* MAC Enable Register */ void __iomem *sbm_maccfg; /* MAC Config Register */ void __iomem *sbm_fifocfg; /* FIFO Config Register */ void __iomem *sbm_framecfg; /* Frame Config Register */ void __iomem *sbm_rxfilter; /* Receive Filter Register */ void __iomem *sbm_isr; /* Interrupt Status Register */ void __iomem *sbm_imr; /* Interrupt Mask Register */ void __iomem *sbm_mdio; /* MDIO Register */ enum sbmac_speed sbm_speed; /* current speed */ enum sbmac_duplex sbm_duplex; /* current duplex */ enum sbmac_fc sbm_fc; /* cur. flow control setting */ int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */ unsigned char sbm_hwaddr[ETH_ALEN]; struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; int rx_hw_checksum; int sbe_idx; }; /********************************************************************** * Externs ********************************************************************* */ /********************************************************************** * Prototypes ********************************************************************* */ static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr); static void sbdma_channel_start(struct sbmacdma *d, int rxtx); static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); static void sbdma_emptyring(struct sbmacdma *d); static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll); static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, int poll); static int sbmac_initctx(struct sbmac_softc *s); static void sbmac_channel_start(struct sbmac_softc *s); static void sbmac_channel_stop(struct sbmac_softc *s); static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, enum sbmac_state); static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, enum sbmac_fc fc); static int sbmac_open(struct net_device *dev); static void sbmac_tx_timeout (struct net_device *dev); static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); static int sbmac_poll(struct napi_struct *napi, int budget); static void sbmac_mii_poll(struct net_device *dev); static int sbmac_mii_probe(struct net_device *dev); static void sbmac_mii_sync(void __iomem *sbm_mdio); static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, int bitcnt); static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 val); /********************************************************************** * Globals ********************************************************************* */ static char sbmac_string[] = "sb1250-mac"; static char sbmac_mdio_string[] = "sb1250-mac-mdio"; /********************************************************************** * MDIO constants ********************************************************************* */ #define MII_COMMAND_START 0x01 #define MII_COMMAND_READ 0x02 #define MII_COMMAND_WRITE 0x01 #define MII_COMMAND_ACK 0x02 #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ #define ENABLE 1 #define DISABLE 0 /********************************************************************** * SBMAC_MII_SYNC(sbm_mdio) * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * * Return value: * nothing ********************************************************************* */ static void sbmac_mii_sync(void __iomem *sbm_mdio) { int cnt; uint64_t bits; int mac_mdio_genc; mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); for (cnt = 0; cnt < 32; cnt++) { __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | mac_mdio_genc, sbm_mdio); } } /********************************************************************** * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * data - data to send * bitcnt - number of bits to send ********************************************************************* */ static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, int bitcnt) { int i; uint64_t bits; unsigned int curmask; int mac_mdio_genc; mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask = 1 << (bitcnt - 1); for (i = 0; i < bitcnt; i++) { if (data & curmask) bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT; __raw_writeq(bits | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask >>= 1; } } /********************************************************************** * SBMAC_MII_READ(bus, phyaddr, regidx) * Read a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY's address * regnum - index of register to read * * Return value: * value read, or 0xffff if an error occurred. ********************************************************************* */ static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int idx; int error; int regval; int mac_mdio_genc; /* * Synchronize ourselves so that the PHY knows the next * thing coming down is a command */ sbmac_mii_sync(sbm_mdio); /* * Send the data to the PHY. The sequence is * a "start" command (2 bits) * a "read" command (2 bits) * the PHY addr (5 bits) * the register index (5 bits) */ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); sbmac_mii_senddata(sbm_mdio, phyaddr, 5); sbmac_mii_senddata(sbm_mdio, regidx, 5); mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; /* * Switch the port around without a clock transition. */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * Send out a clock pulse to signal we want the status */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * If an error occurred, the PHY will signal '1' back */ error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; /* * Issue an 'idle' clock pulse, but keep the direction * the same. */ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); regval = 0; for (idx = 0; idx < 16; idx++) { regval <<= 1; if (error == 0) { if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) regval |= 1; } __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, sbm_mdio); __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); } /* Switch back to output */ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); if (error == 0) return regval; return 0xffff; } /********************************************************************** * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) * * Write a value to a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY to use * regidx - register within the PHY * regval - data to write to register * * Return value: * 0 for success ********************************************************************* */ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 regval) { struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int mac_mdio_genc; sbmac_mii_sync(sbm_mdio); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); sbmac_mii_senddata(sbm_mdio, phyaddr, 5); sbmac_mii_senddata(sbm_mdio, regidx, 5); sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); sbmac_mii_senddata(sbm_mdio, regval, 16); mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); return 0; } /********************************************************************** * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) * * Initialize a DMA channel context. Since there are potentially * eight DMA channels per MAC, it's nice to do this in a standard * way. * * Input parameters: * d - struct sbmacdma (DMA channel context) * s - struct sbmac_softc (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors * * Return value: * nothing ********************************************************************* */ static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr) { #ifdef CONFIG_SBMAC_COALESCE int int_pktcnt, int_timeout; #endif /* * Save away interesting stuff in the structure */ d->sbdma_eth = s; d->sbdma_channel = chan; d->sbdma_txdir = txrx; #if 0 /* RMON clearing */ s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; #endif __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); /* * initialize register pointers */ d->sbdma_config0 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); d->sbdma_config1 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); d->sbdma_dscrbase = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); d->sbdma_dscrcnt = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); d->sbdma_curdscr = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); if (d->sbdma_txdir) d->sbdma_oodpktlost = NULL; else d->sbdma_oodpktlost = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); /* * Allocate memory for the ring */ d->sbdma_maxdescr = maxdescr; d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, sizeof(*d->sbdma_dscrtable), GFP_KERNEL); /* * The descriptor table must be aligned to at least 16 bytes or the * MAC will corrupt it. */ d->sbdma_dscrtable = (struct sbdmadscr *) ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, sizeof(*d->sbdma_dscrtable)); d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); /* * And context table */ d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, sizeof(*d->sbdma_ctxtable), GFP_KERNEL); #ifdef CONFIG_SBMAC_COALESCE /* * Setup Rx/Tx DMA coalescing defaults */ int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; if ( int_pktcnt ) { d->sbdma_int_pktcnt = int_pktcnt; } else { d->sbdma_int_pktcnt = 1; } int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; if ( int_timeout ) { d->sbdma_int_timeout = int_timeout; } else { d->sbdma_int_timeout = 0; } #endif } /********************************************************************** * SBDMA_CHANNEL_START(d) * * Initialize the hardware registers for a DMA channel. * * Input parameters: * d - DMA channel to init (context must be previously init'd * rxtx - DMA_RX or DMA_TX depending on what type of channel * * Return value: * nothing ********************************************************************* */ static void sbdma_channel_start(struct sbmacdma *d, int rxtx) { /* * Turn on the DMA channel */ #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | 0, d->sbdma_config1); __raw_writeq(M_DMA_EOP_INT_EN | V_DMA_RINGSZ(d->sbdma_maxdescr) | V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | 0, d->sbdma_config0); #else __raw_writeq(0, d->sbdma_config1); __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | 0, d->sbdma_config0); #endif __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); /* * Initialize ring pointers */ d->sbdma_addptr = d->sbdma_dscrtable; d->sbdma_remptr = d->sbdma_dscrtable; } /********************************************************************** * SBDMA_CHANNEL_STOP(d) * * Initialize the hardware registers for a DMA channel. * * Input parameters: * d - DMA channel to init (context must be previously init'd * * Return value: * nothing ********************************************************************* */ static void sbdma_channel_stop(struct sbmacdma *d) { /* * Turn off the DMA channel */ __raw_writeq(0, d->sbdma_config1); __raw_writeq(0, d->sbdma_dscrbase); __raw_writeq(0, d->sbdma_config0); /* * Zero ring pointers */ d->sbdma_addptr = NULL; d->sbdma_remptr = NULL; } static inline void sbdma_align_skb(struct sk_buff *skb, unsigned int power2, unsigned int offset) { unsigned char *addr = skb->data; unsigned char *newaddr = PTR_ALIGN(addr, power2); skb_reserve(skb, newaddr - addr + offset); } /********************************************************************** * SBDMA_ADD_RCVBUFFER(d,sb) * * Add a buffer to the specified DMA channel. For receive channels, * this queues a buffer for inbound packets. * * Input parameters: * sc - softc structure * d - DMA channel descriptor * sb - sk_buff to add, or NULL if we should allocate one * * Return value: * 0 if buffer could not be added (ring is full) * 1 if buffer added successfully ********************************************************************* */ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, struct sk_buff *sb) { struct net_device *dev = sc->sbm_dev; struct sbdmadscr *dsc; struct sbdmadscr *nextdsc; struct sk_buff *sb_new = NULL; int pktsize = ENET_PACKET_SIZE; /* get pointer to our current place in the ring */ dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } /* * Allocate a sk_buff if we don't already have one. * If we do have an sk_buff, reset it so that it's empty. * * Note: sk_buffs don't seem to be guaranteed to have any sort * of alignment when they are allocated. Therefore, allocate enough * extra space to make sure that: * * 1. the data does not start in the middle of a cache line. * 2. The data does not end in the middle of a cache line * 3. The buffer can be aligned such that the IP addresses are * naturally aligned. * * Remember, the SOCs MAC writes whole cache lines at a time, * without reading the old contents first. So, if the sk_buff's * data portion starts in the middle of a cache line, the SOC * DMA will trash the beginning (and ending) portions. */ if (sb == NULL) { sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + NET_IP_ALIGN); if (sb_new == NULL) { pr_info("%s: sk_buff allocation failed\n", d->sbdma_eth->sbm_dev->name); return -ENOBUFS; } sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); } else { sb_new = sb; /* * nothing special to reinit buffer, it's already aligned * and sb->data already points to a good place. */ } /* * fill in the descriptor */ #ifdef CONFIG_SBMAC_COALESCE /* * Do not interrupt per DMA transfer. */ dsc->dscr_a = virt_to_phys(sb_new->data) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; #else dsc->dscr_a = virt_to_phys(sb_new->data) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | M_DMA_DSCRA_INTERRUPT; #endif /* receiving: no options */ dsc->dscr_b = 0; /* * fill in the context */ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; /* * point at next packet */ d->sbdma_addptr = nextdsc; /* * Give the buffer to the DMA engine. */ __raw_writeq(1, d->sbdma_dscrcnt); return 0; /* we did it */ } /********************************************************************** * SBDMA_ADD_TXBUFFER(d,sb) * * Add a transmit buffer to the specified DMA channel, causing a * transmit to start. * * Input parameters: * d - DMA channel descriptor * sb - sk_buff to add * * Return value: * 0 transmit queued successfully * otherwise error code ********************************************************************* */ static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) { struct sbdmadscr *dsc; struct sbdmadscr *nextdsc; uint64_t phys; uint64_t ncb; int length; /* get pointer to our current place in the ring */ dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } /* * Under Linux, it's not necessary to copy/coalesce buffers * like it is on NetBSD. We think they're all contiguous, * but that may not be true for GBE. */ length = sb->len; /* * fill in the descriptor. Note that the number of cache * blocks in the descriptor is the number of blocks * *spanned*, so we need to add in the offset (if any) * while doing the calculation. */ phys = virt_to_phys(sb->data); ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); dsc->dscr_a = phys | V_DMA_DSCRA_A_SIZE(ncb) | #ifndef CONFIG_SBMAC_COALESCE M_DMA_DSCRA_INTERRUPT | #endif M_DMA_ETHTX_SOP; /* transmitting: set outbound options and length */ dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | V_DMA_DSCRB_PKT_SIZE(length); /* * fill in the context */ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; /* * point at next packet */ d->sbdma_addptr = nextdsc; /* * Give the buffer to the DMA engine. */ __raw_writeq(1, d->sbdma_dscrcnt); return 0; /* we did it */ } /********************************************************************** * SBDMA_EMPTYRING(d) * * Free all allocated sk_buffs on the specified DMA channel; * * Input parameters: * d - DMA channel * * Return value: * nothing ********************************************************************* */ static void sbdma_emptyring(struct sbmacdma *d) { int idx; struct sk_buff *sb; for (idx = 0; idx < d->sbdma_maxdescr; idx++) { sb = d->sbdma_ctxtable[idx]; if (sb) { dev_kfree_skb(sb); d->sbdma_ctxtable[idx] = NULL; } } } /********************************************************************** * SBDMA_FILLRING(d) * * Fill the specified DMA channel (must be receive channel) * with sk_buffs * * Input parameters: * sc - softc structure * d - DMA channel * * Return value: * nothing ********************************************************************* */ static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) { int idx; for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) break; } } #ifdef CONFIG_NET_POLL_CONTROLLER static void sbmac_netpoll(struct net_device *netdev) { struct sbmac_softc *sc = netdev_priv(netdev); int irq = sc->sbm_dev->irq; __raw_writeq(0, sc->sbm_imr); sbmac_intr(irq, netdev); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), sc->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); #endif } #endif /********************************************************************** * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) * * Process "completed" receive buffers on the specified DMA channel. * * Input parameters: * sc - softc structure * d - DMA channel context * work_to_do - no. of packets to process before enabling interrupt * again (for NAPI) * poll - 1: using polling (for NAPI) * * Return value: * nothing ********************************************************************* */ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll) { struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; int len; int work_done = 0; int dropped = 0; prefetch(d); again: /* Check if the HW dropped any frames */ dev->stats.rx_fifo_errors += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); while (work_to_do-- > 0) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ dsc = d->sbdma_remptr; curidx = dsc - d->sbdma_dscrtable; prefetch(dsc); prefetch(&d->sbdma_ctxtable[curidx]); hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); /* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ if (curidx == hwidx) goto done; /* * Otherwise, get the packet's sk_buff ptr back */ sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; /* * Check packet status. If good, process it. * If not, silently drop it and put it back on the * receive ring. */ if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { /* * Add a new buffer to replace the old one. If we fail * to allocate a buffer, we're going to drop this * packet and put it right back on the receive ring. */ if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == -ENOBUFS)) { dev->stats.rx_dropped++; /* Re-add old buffer */ sbdma_add_rcvbuffer(sc, d, sb); /* No point in continuing at the moment */ printk(KERN_ERR "dropped packet (1)\n"); d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { /* * Set length into the packet */ skb_put(sb,len); /* * Buffer has been replaced on the * receive ring. Pass the buffer to * the kernel */ sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); /* Check hw IPv4/TCP checksum if supported */ if (sc->rx_hw_checksum == ENABLE) { if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { sb->ip_summed = CHECKSUM_UNNECESSARY; /* don't need to set sb->csum */ } else { skb_checksum_none_assert(sb); } } prefetch(sb->data); prefetch((const void *)(((char *)sb->data)+32)); if (poll) dropped = netif_receive_skb(sb); else dropped = netif_rx(sb); if (dropped == NET_RX_DROP) { dev->stats.rx_dropped++; d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { dev->stats.rx_bytes += len; dev->stats.rx_packets++; } } } else { /* * Packet was mangled somehow. Just drop it and * put it back on the receive ring. */ dev->stats.rx_errors++; sbdma_add_rcvbuffer(sc, d, sb); } /* * .. and advance to the next buffer. */ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); work_done++; } if (!poll) { work_to_do = 32; goto again; /* collect fifo drop statistics again */ } done: return work_done; } /********************************************************************** * SBDMA_TX_PROCESS(sc,d) * * Process "completed" transmit buffers on the specified DMA channel. * This is normally called within the interrupt service routine. * Note that this isn't really ideal for priority channels, since * it processes all of the packets on a given channel before * returning. * * Input parameters: * sc - softc structure * d - DMA channel context * poll - 1: using polling (for NAPI) * * Return value: * nothing ********************************************************************* */ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, int poll) { struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; unsigned long flags; int packets_handled = 0; spin_lock_irqsave(&(sc->sbm_lock), flags); if (d->sbdma_remptr == d->sbdma_addptr) goto end_unlock; hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); for (;;) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ curidx = d->sbdma_remptr - d->sbdma_dscrtable; /* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ if (curidx == hwidx) break; /* * Otherwise, get the packet's sk_buff ptr back */ dsc = &(d->sbdma_dscrtable[curidx]); sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; /* * Stats */ dev->stats.tx_bytes += sb->len; dev->stats.tx_packets++; /* * for transmits, we just free buffers. */ dev_kfree_skb_irq(sb); /* * .. and advance to the next buffer. */ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); packets_handled++; } /* * Decide if we should wake up the protocol or not. * Other drivers seem to do this when we reach a low * watermark on the transmit queue. */ if (packets_handled) netif_wake_queue(d->sbdma_eth->sbm_dev); end_unlock: spin_unlock_irqrestore(&(sc->sbm_lock), flags); } /********************************************************************** * SBMAC_INITCTX(s) * * Initialize an Ethernet context structure - this is called * once per MAC on the 1250. Memory is allocated here, so don't * call it again from inside the ioctl routines that bring the * interface up/down * * Input parameters: * s - sbmac context structure * * Return value: * 0 ********************************************************************* */ static int sbmac_initctx(struct sbmac_softc *s) { /* * figure out the addresses of some ports */ s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; s->sbm_maccfg = s->sbm_base + R_MAC_CFG; s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; s->sbm_isr = s->sbm_base + R_MAC_STATUS; s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; s->sbm_mdio = s->sbm_base + R_MAC_MDIO; /* * Initialize the DMA channels. Right now, only one per MAC is used * Note: Only do this _once_, as it allocates memory from the kernel! */ sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); /* * initial state is OFF */ s->sbm_state = sbmac_state_off; return 0; } static void sbdma_uninitctx(struct sbmacdma *d) { if (d->sbdma_dscrtable_unaligned) { kfree(d->sbdma_dscrtable_unaligned); d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; } if (d->sbdma_ctxtable) { kfree(d->sbdma_ctxtable); d->sbdma_ctxtable = NULL; } } static void sbmac_uninitctx(struct sbmac_softc *sc) { sbdma_uninitctx(&(sc->sbm_txdma)); sbdma_uninitctx(&(sc->sbm_rxdma)); } /********************************************************************** * SBMAC_CHANNEL_START(s) * * Start packet processing on this MAC. * * Input parameters: * s - sbmac structure * * Return value: * nothing ********************************************************************* */ static void sbmac_channel_start(struct sbmac_softc *s) { uint64_t reg; void __iomem *port; uint64_t cfg,fifo,framecfg; int idx, th_value; /* * Don't do this if running */ if (s->sbm_state == sbmac_state_on) return; /* * Bring the controller out of reset, but leave it off. */ __raw_writeq(0, s->sbm_macenable); /* * Ignore all received packets */ __raw_writeq(0, s->sbm_rxfilter); /* * Calculate values for various control registers. */ cfg = M_MAC_RETRY_EN | M_MAC_TX_HOLD_SOP_EN | V_MAC_TX_PAUSE_CNT_16K | M_MAC_AP_STAT_EN | M_MAC_FAST_SYNC | M_MAC_SS_EN | 0; /* * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above * Use a larger RD_THRSH for gigabit */ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) th_value = 28; else th_value = 64; fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ ((s->sbm_speed == sbmac_speed_1000) ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | V_MAC_TX_RL_THRSH(4) | V_MAC_RX_PL_THRSH(4) | V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ V_MAC_RX_RL_THRSH(8) | 0; framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | V_MAC_MAX_FRAMESZ_DEFAULT | V_MAC_BACKOFF_SEL(1); /* * Clear out the hash address map */ port = s->sbm_base + R_MAC_HASH_BASE; for (idx = 0; idx < MAC_HASH_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Clear out the exact-match table */ port = s->sbm_base + R_MAC_ADDR_BASE; for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Clear out the DMA Channel mapping table registers */ port = s->sbm_base + R_MAC_CHUP0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } port = s->sbm_base + R_MAC_CHLO0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { __raw_writeq(0, port); port += sizeof(uint64_t); } /* * Program the hardware address. It goes into the hardware-address * register as well as the first filter register. */ reg = sbmac_addr2reg(s->sbm_hwaddr); port = s->sbm_base + R_MAC_ADDR_BASE; __raw_writeq(reg, port); port = s->sbm_base + R_MAC_ETHERNET_ADDR; #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* * Pass1 SOCs do not receive packets addressed to the * destination address in the R_MAC_ETHERNET_ADDR register. * Set the value to zero. */ __raw_writeq(0, port); #else __raw_writeq(reg, port); #endif /* * Set the receive filter for no packets, and write values * to the various config registers */ __raw_writeq(0, s->sbm_rxfilter); __raw_writeq(0, s->sbm_imr); __raw_writeq(framecfg, s->sbm_framecfg); __raw_writeq(fifo, s->sbm_fifocfg); __raw_writeq(cfg, s->sbm_maccfg); /* * Initialize DMA channels (rings should be ok now) */ sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); sbdma_channel_start(&(s->sbm_txdma), DMA_TX); /* * Configure the speed, duplex, and flow control */ sbmac_set_speed(s,s->sbm_speed); sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); /* * Fill the receive ring */ sbdma_fillring(s, &(s->sbm_rxdma)); /* * Turn on the rest of the bits in the enable register */ #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) __raw_writeq(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0, s->sbm_macenable); #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) __raw_writeq(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 | M_MAC_RX_ENABLE | M_MAC_TX_ENABLE, s->sbm_macenable); #else #error invalid SiByte MAC configuration #endif #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); #endif /* * Enable receiving unicasts and broadcasts */ __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); /* * we're running now. */ s->sbm_state = sbmac_state_on; /* * Program multicast addresses */ sbmac_setmulti(s); /* * If channel was in promiscuous mode before, turn that on */ if (s->sbm_devflags & IFF_PROMISC) { sbmac_promiscuous_mode(s,1); } } /********************************************************************** * SBMAC_CHANNEL_STOP(s) * * Stop packet processing on this MAC. * * Input parameters: * s - sbmac structure * * Return value: * nothing ********************************************************************* */ static void sbmac_channel_stop(struct sbmac_softc *s) { /* don't do this if already stopped */ if (s->sbm_state == sbmac_state_off) return; /* don't accept any packets, disable all interrupts */ __raw_writeq(0, s->sbm_rxfilter); __raw_writeq(0, s->sbm_imr); /* Turn off ticker */ /* XXX */ /* turn off receiver and transmitter */ __raw_writeq(0, s->sbm_macenable); /* We're stopped now. */ s->sbm_state = sbmac_state_off; /* * Stop DMA channels (rings should be ok now) */ sbdma_channel_stop(&(s->sbm_rxdma)); sbdma_channel_stop(&(s->sbm_txdma)); /* Empty the receive and transmit rings */ sbdma_emptyring(&(s->sbm_rxdma)); sbdma_emptyring(&(s->sbm_txdma)); } /********************************************************************** * SBMAC_SET_CHANNEL_STATE(state) * * Set the channel's state ON or OFF * * Input parameters: * state - new state * * Return value: * old state ********************************************************************* */ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, enum sbmac_state state) { enum sbmac_state oldstate = sc->sbm_state; /* * If same as previous state, return */ if (state == oldstate) { return oldstate; } /* * If new state is ON, turn channel on */ if (state == sbmac_state_on) { sbmac_channel_start(sc); } else { sbmac_channel_stop(sc); } /* * Return previous state */ return oldstate; } /********************************************************************** * SBMAC_PROMISCUOUS_MODE(sc,onoff) * * Turn on or off promiscuous mode * * Input parameters: * sc - softc * onoff - 1 to turn on, 0 to turn off * * Return value: * nothing ********************************************************************* */ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) { uint64_t reg; if (sc->sbm_state != sbmac_state_on) return; if (onoff) { reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_ALLPKT_EN; __raw_writeq(reg, sc->sbm_rxfilter); } else { reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_ALLPKT_EN; __raw_writeq(reg, sc->sbm_rxfilter); } } /********************************************************************** * SBMAC_SETIPHDR_OFFSET(sc,onoff) * * Set the iphdr offset as 15 assuming ethernet encapsulation * * Input parameters: * sc - softc * * Return value: * nothing ********************************************************************* */ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) { uint64_t reg; /* Hard code the off set to 15 for now */ reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); __raw_writeq(reg, sc->sbm_rxfilter); /* BCM1250 pass1 didn't have hardware checksum. Everything later does. */ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { sc->rx_hw_checksum = DISABLE; } else { sc->rx_hw_checksum = ENABLE; } } /********************************************************************** * SBMAC_ADDR2REG(ptr) * * Convert six bytes into the 64-bit register value that * we typically write into the SBMAC's address/mcast registers * * Input parameters: * ptr - pointer to 6 bytes * * Return value: * register value ********************************************************************* */ static uint64_t sbmac_addr2reg(unsigned char *ptr) { uint64_t reg = 0; ptr += 6; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); reg <<= 8; reg |= (uint64_t) *(--ptr); return reg; } /********************************************************************** * SBMAC_SET_SPEED(s,speed) * * Configure LAN speed for the specified MAC. * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * speed - speed to set MAC to (see enum sbmac_speed) * * Return value: * 1 if successful * 0 indicates invalid parameters ********************************************************************* */ static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) { uint64_t cfg; uint64_t framecfg; /* * Save new current values */ s->sbm_speed = speed; if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ /* * Read current register values */ cfg = __raw_readq(s->sbm_maccfg); framecfg = __raw_readq(s->sbm_framecfg); /* * Mask out the stuff we want to change */ cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | M_MAC_SLOT_SIZE); /* * Now add in the new bits */ switch (speed) { case sbmac_speed_10: framecfg |= V_MAC_IFG_RX_10 | V_MAC_IFG_TX_10 | K_MAC_IFG_THRSH_10 | V_MAC_SLOT_SIZE_10; cfg |= V_MAC_SPEED_SEL_10MBPS; break; case sbmac_speed_100: framecfg |= V_MAC_IFG_RX_100 | V_MAC_IFG_TX_100 | V_MAC_IFG_THRSH_100 | V_MAC_SLOT_SIZE_100; cfg |= V_MAC_SPEED_SEL_100MBPS ; break; case sbmac_speed_1000: framecfg |= V_MAC_IFG_RX_1000 | V_MAC_IFG_TX_1000 | V_MAC_IFG_THRSH_1000 | V_MAC_SLOT_SIZE_1000; cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; break; default: return 0; } /* * Send the bits back to the hardware */ __raw_writeq(framecfg, s->sbm_framecfg); __raw_writeq(cfg, s->sbm_maccfg); return 1; } /********************************************************************** * SBMAC_SET_DUPLEX(s,duplex,fc) * * Set Ethernet duplex and flow control options for this MAC * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * duplex - duplex setting (see enum sbmac_duplex) * fc - flow control setting (see enum sbmac_fc) * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified ********************************************************************* */ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, enum sbmac_fc fc) { uint64_t cfg; /* * Save new current values */ s->sbm_duplex = duplex; s->sbm_fc = fc; if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ /* * Read current register values */ cfg = __raw_readq(s->sbm_maccfg); /* * Mask off the stuff we're about to change */ cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); switch (duplex) { case sbmac_duplex_half: switch (fc) { case sbmac_fc_disabled: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; break; case sbmac_fc_collision: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; break; case sbmac_fc_carrier: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break; case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0; } break; case sbmac_duplex_full: switch (fc) { case sbmac_fc_disabled: cfg |= V_MAC_FC_CMD_DISABLED; break; case sbmac_fc_frame: cfg |= V_MAC_FC_CMD_ENABLED; break; case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ default: return 0; } break; default: return 0; } /* * Send the bits back to the hardware */ __raw_writeq(cfg, s->sbm_maccfg); return 1; } /********************************************************************** * SBMAC_INTR() * * Interrupt handler for MAC interrupts * * Input parameters: * MAC structure * * Return value: * nothing ********************************************************************* */ static irqreturn_t sbmac_intr(int irq,void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct sbmac_softc *sc = netdev_priv(dev); uint64_t isr; int handled = 0; /* * Read the ISR (this clears the bits in the real * register, except for counter addr) */ isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; if (isr == 0) return IRQ_RETVAL(0); handled = 1; /* * Transmits on channel 0 */ if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) sbdma_tx_process(sc,&(sc->sbm_txdma), 0); if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { if (napi_schedule_prep(&sc->napi)) { __raw_writeq(0, sc->sbm_imr); __napi_schedule(&sc->napi); /* Depend on the exit from poll to reenable intr */ } else { /* may leave some packets behind */ sbdma_rx_process(sc,&(sc->sbm_rxdma), SBMAC_MAX_RXDESCR * 2, 0); } } return IRQ_RETVAL(handled); } /********************************************************************** * SBMAC_START_TX(skb,dev) * * Start output on the specified interface. Basically, we * queue as many buffers as we can until the ring fills up, or * we run off the end of the queue, whichever comes first. * * Input parameters: * * * Return value: * nothing ********************************************************************* */ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; /* lock eth irq */ spin_lock_irqsave(&sc->sbm_lock, flags); /* * Put the buffer on the transmit ring. If we * don't have room, stop the queue. */ if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { /* XXX save skb that we could not send */ netif_stop_queue(dev); spin_unlock_irqrestore(&sc->sbm_lock, flags); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&sc->sbm_lock, flags); return NETDEV_TX_OK; } /********************************************************************** * SBMAC_SETMULTI(sc) * * Reprogram the multicast table into the hardware, given * the list of multicasts associated with the interface * structure. * * Input parameters: * sc - softc * * Return value: * nothing ********************************************************************* */ static void sbmac_setmulti(struct sbmac_softc *sc) { uint64_t reg; void __iomem *port; int idx; struct netdev_hw_addr *ha; struct net_device *dev = sc->sbm_dev; /* * Clear out entire multicast table. We do this by nuking * the entire hash table and all the direct matches except * the first one, which is used for our station address */ for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); __raw_writeq(0, port); } for (idx = 0; idx < MAC_HASH_COUNT; idx++) { port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); __raw_writeq(0, port); } /* * Clear the filter to say we don't want any multicasts. */ reg = __raw_readq(sc->sbm_rxfilter); reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); __raw_writeq(reg, sc->sbm_rxfilter); if (dev->flags & IFF_ALLMULTI) { /* * Enable ALL multicasts. Do this by inverting the * multicast enable bit. */ reg = __raw_readq(sc->sbm_rxfilter); reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); __raw_writeq(reg, sc->sbm_rxfilter); return; } /* * Progam new multicast entries. For now, only use the * perfect filter. In the future we'll need to use the * hash filter if the perfect filter overflows */ /* XXX only using perfect filter for now, need to use hash * XXX if the table overflows */ idx = 1; /* skip station address */ netdev_for_each_mc_addr(ha, dev) { if (idx == MAC_ADDR_COUNT) break; reg = sbmac_addr2reg(ha->addr); port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); __raw_writeq(reg, port); idx++; } /* * Enable the "accept multicast bits" if we programmed at least one * multicast. */ if (idx > 1) { reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_MCAST_EN; __raw_writeq(reg, sc->sbm_rxfilter); } } static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) { if (new_mtu > ENET_PACKET_SIZE) return -EINVAL; _dev->mtu = new_mtu; pr_info("changing the mtu to %d\n", new_mtu); return 0; } static const struct net_device_ops sbmac_netdev_ops = { .ndo_open = sbmac_open, .ndo_stop = sbmac_close, .ndo_start_xmit = sbmac_start_tx, .ndo_set_rx_mode = sbmac_set_rx_mode, .ndo_tx_timeout = sbmac_tx_timeout, .ndo_do_ioctl = sbmac_mii_ioctl, .ndo_change_mtu = sb1250_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sbmac_netpoll, #endif }; /********************************************************************** * SBMAC_INIT(dev) * * Attach routine - init hardware and hook ourselves into linux * * Input parameters: * dev - net_device structure * * Return value: * status ********************************************************************* */ static int sbmac_init(struct platform_device *pldev, long long base) { struct net_device *dev = dev_get_drvdata(&pldev->dev); int idx = pldev->id; struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; uint64_t ea_reg; int i; int err; sc->sbm_dev = dev; sc->sbe_idx = idx; eaddr = sc->sbm_hwaddr; /* * Read the ethernet address. The firmware left this programmed * for us in the ethernet address register for each mac. */ ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); for (i = 0; i < 6; i++) { eaddr[i] = (uint8_t) (ea_reg & 0xFF); ea_reg >>= 8; } for (i = 0; i < 6; i++) { dev->dev_addr[i] = eaddr[i]; } /* * Initialize context (get pointers to registers and stuff), then * allocate the memory for the descriptor tables. */ sbmac_initctx(sc); /* * Set up Linux device callins */ spin_lock_init(&(sc->sbm_lock)); dev->netdev_ops = &sbmac_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; netif_napi_add(dev, &sc->napi, sbmac_poll, 16); dev->irq = UNIT_INT(idx); /* This is needed for PASS2 for Rx H/W checksum feature */ sbmac_set_iphdr_offset(sc); sc->mii_bus = mdiobus_alloc(); if (sc->mii_bus == NULL) { err = -ENOMEM; goto uninit_ctx; } sc->mii_bus->name = sbmac_mdio_string; snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pldev->name, idx); sc->mii_bus->priv = sc; sc->mii_bus->read = sbmac_mii_read; sc->mii_bus->write = sbmac_mii_write; sc->mii_bus->irq = sc->phy_irq; for (i = 0; i < PHY_MAX_ADDR; ++i) sc->mii_bus->irq[i] = SBMAC_PHY_INT; sc->mii_bus->parent = &pldev->dev; /* * Probe PHY address */ err = mdiobus_register(sc->mii_bus); if (err) { printk(KERN_ERR "%s: unable to register MDIO bus\n", dev->name); goto free_mdio; } dev_set_drvdata(&pldev->dev, sc->mii_bus); err = register_netdev(dev); if (err) { printk(KERN_ERR "%s.%d: unable to register netdev\n", sbmac_string, idx); goto unreg_mdio; } pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); if (sc->rx_hw_checksum == ENABLE) pr_info("%s: enabling TCP rcv checksum\n", dev->name); /* * Display Ethernet address (this is called during the config * process so we need to finish off the config message that * was being displayed) */ pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", dev->name, base, eaddr); return 0; unreg_mdio: mdiobus_unregister(sc->mii_bus); dev_set_drvdata(&pldev->dev, NULL); free_mdio: mdiobus_free(sc->mii_bus); uninit_ctx: sbmac_uninitctx(sc); return err; } static int sbmac_open(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); int err; if (debug > 1) pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); /* * map/route interrupt (clear status first, in case something * weird is pending; we haven't initialized the mac registers * yet) */ __raw_readq(sc->sbm_isr); err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); if (err) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); goto out_err; } sc->sbm_speed = sbmac_speed_none; sc->sbm_duplex = sbmac_duplex_none; sc->sbm_fc = sbmac_fc_none; sc->sbm_pause = -1; sc->sbm_link = 0; /* * Attach to the PHY */ err = sbmac_mii_probe(dev); if (err) goto out_unregister; /* * Turn on the channel */ sbmac_set_channel_state(sc,sbmac_state_on); netif_start_queue(dev); sbmac_set_rx_mode(dev); phy_start(sc->phy_dev); napi_enable(&sc->napi); return 0; out_unregister: free_irq(dev->irq, dev); out_err: return err; } static int sbmac_mii_probe(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev; int i; for (i = 0; i < PHY_MAX_ADDR; i++) { phy_dev = sc->mii_bus->phy_map[i]; if (phy_dev) break; } if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO; } phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev); } /* Remove any features not supported by the controller */ phy_dev->supported &= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; phy_dev->advertising = phy_dev->supported; pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", dev->name, phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); sc->phy_dev = phy_dev; return 0; } static void sbmac_mii_poll(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev = sc->phy_dev; unsigned long flags; enum sbmac_fc fc; int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; link_chg = (sc->sbm_link != phy_dev->link); speed_chg = (sc->sbm_speed != phy_dev->speed); duplex_chg = (sc->sbm_duplex != phy_dev->duplex); pause_chg = (sc->sbm_pause != phy_dev->pause); if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) return; /* Hmmm... */ if (!phy_dev->link) { if (link_chg) { sc->sbm_link = phy_dev->link; sc->sbm_speed = sbmac_speed_none; sc->sbm_duplex = sbmac_duplex_none; sc->sbm_fc = sbmac_fc_disabled; sc->sbm_pause = -1; pr_info("%s: link unavailable\n", dev->name); } return; } if (phy_dev->duplex == DUPLEX_FULL) { if (phy_dev->pause) fc = sbmac_fc_frame; else fc = sbmac_fc_disabled; } else fc = sbmac_fc_collision; fc_chg = (sc->sbm_fc != fc); pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); spin_lock_irqsave(&sc->sbm_lock, flags); sc->sbm_speed = phy_dev->speed; sc->sbm_duplex = phy_dev->duplex; sc->sbm_fc = fc; sc->sbm_pause = phy_dev->pause; sc->sbm_link = phy_dev->link; if ((speed_chg || duplex_chg || fc_chg) && sc->sbm_state != sbmac_state_off) { /* * something changed, restart the channel */ if (debug > 1) pr_debug("%s: restarting channel " "because PHY state changed\n", dev->name); sbmac_channel_stop(sc); sbmac_channel_start(sc); } spin_unlock_irqrestore(&sc->sbm_lock, flags); } static void sbmac_tx_timeout (struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&sc->sbm_lock, flags); dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; spin_unlock_irqrestore(&sc->sbm_lock, flags); printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); } static void sbmac_set_rx_mode(struct net_device *dev) { unsigned long flags; struct sbmac_softc *sc = netdev_priv(dev); spin_lock_irqsave(&sc->sbm_lock, flags); if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { /* * Promiscuous changed. */ if (dev->flags & IFF_PROMISC) { sbmac_promiscuous_mode(sc,1); } else { sbmac_promiscuous_mode(sc,0); } } spin_unlock_irqrestore(&sc->sbm_lock, flags); /* * Program the multicasts. Do this every time. */ sbmac_setmulti(sc); } static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct sbmac_softc *sc = netdev_priv(dev); if (!netif_running(dev) || !sc->phy_dev) return -EINVAL; return phy_mii_ioctl(sc->phy_dev, rq, cmd); } static int sbmac_close(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); napi_disable(&sc->napi); phy_stop(sc->phy_dev); sbmac_set_channel_state(sc, sbmac_state_off); netif_stop_queue(dev); if (debug > 1) pr_debug("%s: Shutting down ethercard\n", dev->name); phy_disconnect(sc->phy_dev); sc->phy_dev = NULL; free_irq(dev->irq, dev); sbdma_emptyring(&(sc->sbm_txdma)); sbdma_emptyring(&(sc->sbm_rxdma)); return 0; } static int sbmac_poll(struct napi_struct *napi, int budget) { struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); int work_done; work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { napi_complete(napi); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), sc->sbm_imr); #else __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); #endif } return work_done; } static int __devinit sbmac_probe(struct platform_device *pldev) { struct net_device *dev; struct sbmac_softc *sc; void __iomem *sbm_base; struct resource *res; u64 sbmac_orig_hwaddr; int err; res = platform_get_resource(pldev, IORESOURCE_MEM, 0); BUG_ON(!res); sbm_base = ioremap_nocache(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", dev_name(&pldev->dev)); err = -ENOMEM; goto out_out; } /* * The R_MAC_ETHERNET_ADDR register will be set to some nonzero * value for us by the firmware if we're going to use this MAC. * If we find a zero, skip this MAC. */ sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); if (sbmac_orig_hwaddr == 0) { err = 0; goto out_unmap; } /* * Okay, cool. Initialize this MAC. */ dev = alloc_etherdev(sizeof(struct sbmac_softc)); if (!dev) { err = -ENOMEM; goto out_unmap; } dev_set_drvdata(&pldev->dev, dev); SET_NETDEV_DEV(dev, &pldev->dev); sc = netdev_priv(dev); sc->sbm_base = sbm_base; err = sbmac_init(pldev, res->start); if (err) goto out_kfree; return 0; out_kfree: free_netdev(dev); __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); out_unmap: iounmap(sbm_base); out_out: return err; } static int __exit sbmac_remove(struct platform_device *pldev) { struct net_device *dev = dev_get_drvdata(&pldev->dev); struct sbmac_softc *sc = netdev_priv(dev); unregister_netdev(dev); sbmac_uninitctx(sc); mdiobus_unregister(sc->mii_bus); mdiobus_free(sc->mii_bus); iounmap(sc->sbm_base); free_netdev(dev); return 0; } static struct platform_driver sbmac_driver = { .probe = sbmac_probe, .remove = __exit_p(sbmac_remove), .driver = { .name = sbmac_string, .owner = THIS_MODULE, }, }; module_platform_driver(sbmac_driver);
gpl-2.0
sssangram14/android_kernel_samsung_arubaslim
net/sched/act_skbedit.c
5499
5843
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h> #define SKBEDIT_TAB_MASK 15 static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1]; static u32 skbedit_idx_gen; static DEFINE_RWLOCK(skbedit_lock); static struct tcf_hashinfo skbedit_hash_info = { .htab = tcf_skbedit_ht, .hmask = SKBEDIT_TAB_MASK, .lock = &skbedit_lock, }; static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbedit *d = a->priv; spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; bstats_update(&d->tcf_bstats, skb); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; if (d->flags & SKBEDIT_F_QUEUE_MAPPING && skb->dev->real_num_tx_queues > d->queue_mapping) skb_set_queue_mapping(skb, d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) skb->mark = d->mark; spin_unlock(&d->tcf_lock); return d->tcf_action; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, }; static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; struct tcf_common *pc; u32 flags = 0, *priority = NULL, *mark = NULL; u16 *queue_mapping = NULL; int ret = 0, err; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); if (err < 0) return err; if (tb[TCA_SKBEDIT_PARMS] == NULL) return -EINVAL; if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { flags |= SKBEDIT_F_PRIORITY; priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); } if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { flags |= SKBEDIT_F_QUEUE_MAPPING; queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); } if (tb[TCA_SKBEDIT_MARK] != NULL) { flags |= SKBEDIT_F_MARK; mark = nla_data(tb[TCA_SKBEDIT_MARK]); } if (!flags) return -EINVAL; parm = nla_data(tb[TCA_SKBEDIT_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &skbedit_idx_gen, &skbedit_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); d = to_skbedit(pc); ret = ACT_P_CREATED; } else { d = to_skbedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &skbedit_hash_info); return -EEXIST; } } spin_lock_bh(&d->tcf_lock); d->flags = flags; if (flags & SKBEDIT_F_PRIORITY) d->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) d->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) d->mark = *mark; d->tcf_action = parm->action; spin_unlock_bh(&d->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &skbedit_hash_info); return ret; } static int tcf_skbedit_cleanup(struct tc_action *a, int bind) { struct tcf_skbedit *d = a->priv; if (d) return tcf_hash_release(&d->common, bind, &skbedit_hash_info); return 0; } static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); if (d->flags & SKBEDIT_F_PRIORITY) NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), &d->priority); if (d->flags & SKBEDIT_F_QUEUE_MAPPING) NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, sizeof(d->queue_mapping), &d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), &d->mark); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .hinfo = &skbedit_hash_info, .type = TCA_ACT_SKBEDIT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_skbedit, .dump = tcf_skbedit_dump, .cleanup = tcf_skbedit_cleanup, .init = tcf_skbedit_init, .walk = tcf_generic_walker, }; MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); MODULE_DESCRIPTION("SKB Editing"); MODULE_LICENSE("GPL"); static int __init skbedit_init_module(void) { return tcf_register_action(&act_skbedit_ops); } static void __exit skbedit_cleanup_module(void) { tcf_unregister_action(&act_skbedit_ops); } module_init(skbedit_init_module); module_exit(skbedit_cleanup_module);
gpl-2.0
titanxxh/xengt-ha-kernel
arch/tile/lib/delay.c
12155
1184
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/thread_info.h> #include <asm/timex.h> void __udelay(unsigned long usecs) { if (usecs > ULONG_MAX / 1000) { WARN_ON_ONCE(usecs > ULONG_MAX / 1000); usecs = ULONG_MAX / 1000; } __ndelay(usecs * 1000); } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { cycles_t target = get_cycles(); target += ns2cycles(nsecs); while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__ndelay); void __delay(unsigned long cycles) { cycles_t target = get_cycles() + cycles; while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__delay);
gpl-2.0
hallor/linux
arch/x86/kvm/emulate.c
124
145251
/****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include <linux/module.h> #include <asm/kvm_emulate.h> #include <linux/stringify.h> #include <asm/debugreg.h> #include "x86.h" #include "tss.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstMem16 (OpMem16 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ #define ModeDual (7<<15) /* Different instruction for 32/64 bit */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define NearBranch ((u64)1 << 52) /* Near branches */ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * fastop functions are declared as taking a never-defined fastop parameter, * so they can't be called from C directly. */ struct fastop; struct opcode { u64 flags : 56; u64 intercept : 8; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; const struct instr_dual *idual; const struct mode_dual *mdual; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; struct instr_dual { struct opcode mod012; struct opcode mod3; }; struct mode_dual { struct opcode mode32; struct opcode mode64; }; #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a enum x86_transfer_type { X86_TRANSFER_NONE, X86_TRANSFER_CALL_JMP, X86_TRANSFER_RET, X86_TRANSFER_TASK_SWITCH, }; static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) { if (!(ctxt->regs_valid & (1 << nr))) { ctxt->regs_valid |= 1 << nr; ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); } return ctxt->_regs[nr]; } static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) { ctxt->regs_valid |= 1 << nr; ctxt->regs_dirty |= 1 << nr; return &ctxt->_regs[nr]; } static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) { reg_read(ctxt, nr); return reg_write(ctxt, nr); } static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned reg; for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ X86_EFLAGS_PF|X86_EFLAGS_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ FOP_ALIGN \ "em_" #op ": \n\t" #define FOP_END \ ".popsection") #define FOPNOP() FOP_ALIGN FOP_RET #define FOP1E(op, dst) \ FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END /* 2 operand, src and dest are reversed */ #define FASTOP2R(op, name) \ FOP_START(name) \ FOP2E(op##b, dl, al) \ FOP2E(op##w, dx, ax) \ FOP2E(op##l, edx, eax) \ ON64(FOP2E(op##q, rdx, rax)) \ FOP_END #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static void assign_register(unsigned long *reg, u64 val, int bytes) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (bytes) { case 1: *(u8 *)reg = (u8)val; break; case 2: *(u16 *)reg = (u16)val; break; case 4: *reg = (u32)val; break; /* 64b: zero-extend */ case 8: *reg = val; break; } } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg) { return address_mask(ctxt, reg_read(ctxt, reg)); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) { ulong *preg = reg_rmw(ctxt, reg); assign_register(preg, *preg + inc, ctxt->ad_bytes); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { WARN_ON(vec > 0x1f); ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. */ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) { if (likely(size < 16)) return false; if (ctxt->d & Aligned) return true; else if (ctxt->d & Unaligned) return false; else if (ctxt->d & Avx) return false; else return true; } static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, bool write, bool fetch, enum x86emul_mode mode, ulong *linear) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; la = seg_base(ctxt, addr.seg) + addr.ea; *linear = la; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: if (is_noncanonical_address(la)) goto bad; *max_size = min_t(u64, ~0u, (1ull << 48) - la); if (size > *max_size) goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && write) goto bad; /* unreadable code segment */ if (!fetch && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { /* expand-down segment */ if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; } if (addr.ea > lim) goto bad; if (lim == 0xffffffff) *max_size = ~0u; else { *max_size = (u64)lim + 1 - addr.ea; if (size > *max_size) goto bad; } la &= (u32)-1; break; } if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, 0); else return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { unsigned max_size; return __linearize(ctxt, addr, &max_size, size, write, false, ctxt->mode, linear); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, enum x86emul_mode mode) { ulong linear; int rc; unsigned max_size; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = dst }; if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip(ctxt, dst, ctxt->mode); } static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, const struct desc_struct *cs_desc) { enum x86emul_mode mode = ctxt->mode; int rc; #ifdef CONFIG_X86_64 if (ctxt->mode >= X86EMUL_MODE_PROT16) { if (cs_desc->l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) mode = X86EMUL_MODE_PROT64; } else mode = X86EMUL_MODE_PROT32; /* temporary value */ } #endif if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; rc = assign_eip(ctxt, dst, mode); if (rc == X86EMUL_CONTINUE) ctxt->mode = mode; return rc; } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; /* * We do not know exactly how many bytes will be needed, and * __linearize is expensive, so fetch as much as possible. We * just have to avoid going beyond the 15 byte limit, the end * of the segment, or the end of the page. * * __linearize is called with size 0 so that it does not do any * boundary check itself. Instead, we use max_size to check * against op_size. */ rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return emulate_gp(ctxt, 0); rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; if (unlikely(done_size < size)) return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); FASTOP2R(cmp, cmp_r); static int em_bsf_c(struct x86_emulate_ctxt *ctxt) { /* If src is zero, do not writeback, but update flags */ if (ctxt->src.val == 0) ctxt->dst.type = OP_NONE; return fastop(ctxt, em_bsf); } static int em_bsr_c(struct x86_emulate_ctxt *ctxt) { /* If src is zero, do not writeback, but update flags */ if (ctxt->src.val == 0) ctxt->dst.type = OP_NONE; return fastop(ctxt, em_bsr); } static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fninit"); ctxt->ops->put_fpu(ctxt); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstcw %0": "+m"(fcw)); ctxt->ops->put_fpu(ctxt); ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstsw %0": "+m"(fsw)); ctxt->ops->put_fpu(ctxt); ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; read_sse_reg(ctxt, &op->vec_val, reg); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); /* Increment ESP on POP [ESP] */ if ((ctxt->d & IncSP) && base_reg == VCPU_REGS_RSP) modrm_ea += ctxt->op_bytes; } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { modrm_ea += insn_fetch(s32, ctxt); if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea = address_mask(ctxt, ctxt->dst.addr.mem.ea + (sv >> 3)); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; WARN_ON((mc->end + size) >= sizeof(mc->data)); rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & X86_EFLAGS_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & X86_EFLAGS_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset (dt, 0, sizeof *dt); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; #ifdef CONFIG_X86_64 if (addr >> 32 != 0) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_LMA)) addr &= (u32)-1; } #endif *desc_addr_p = addr; return X86EMUL_CONTINUE; } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { int rc; rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { int rc; ulong addr; rc = get_descriptor_ptr(ctxt, selector, &addr); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, enum x86_transfer_type transfer, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) { if (transfer == X86_TRANSFER_CALL_JMP) return X86EMUL_UNHANDLEABLE; goto exception; } if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ if (!(seg_desc.type & 1)) { seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (is_noncanonical_address(get_desc_base(&seg_desc) | ((u64)base3 << 32))) return emulate_gp(ctxt, 0); } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); return __load_segment_descriptor(ctxt, selector, seg, cpl, X86_TRANSFER_NONE, NULL); } static void write_register_operand(struct operand *op) { return assign_register(op->addr.reg, op->val, op->bytes); } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); break; case OP_XMM: write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); break; case OP_MM: write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; rsp_increment(ctxt, -bytes); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= X86_EFLAGS_IOPL; if (cpl <= iopl) change_mask |= X86_EFLAGS_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= X86_EFLAGS_IF; break; default: /* real mode */ change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); if (ctxt->op_bytes == 4) { rsp_increment(ctxt, -2); ctxt->op_bytes = 2; } return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector; int rc; rc = emulate_pop(ctxt, &selector, 2); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; if (ctxt->op_bytes > 2) rsp_increment(ctxt, ctxt->op_bytes - 2); rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; u32 val; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, &val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | X86_EFLAGS_AC | X86_EFLAGS_ID | X86_EFLAGS_FIXED; unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | X86_EFLAGS_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= X86_EFLAGS_FIXED; ctxt->ops->set_nmi_mask(ctxt, false); return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; } static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) { return assign_eip_near(ctxt, ctxt->src.val); } static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) { int rc; long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; rc = em_push(ctxt); return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~X86_EFLAGS_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= X86_EFLAGS_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, X86_TRANSFER_RET, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & X86_EFLAGS_ZF) { /* Success: write back to memory; no update of EAX */ ctxt->src.type = OP_NONE; ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->src.type = OP_REG; ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->src.val = ctxt->dst.orig_val; /* Create write-cycle to dest by writing the same value */ ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = 0x80000001; ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return edx & bit(X86_FEATURE_LM); } #define GET_SMSTATE(type, smbase, offset) \ ({ \ type __val; \ int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \ sizeof(__val)); \ if (r != X86EMUL_CONTINUE) \ return X86EMUL_UNHANDLEABLE; \ __val; \ }) static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) { desc->g = (flags >> 23) & 1; desc->d = (flags >> 22) & 1; desc->l = (flags >> 21) & 1; desc->avl = (flags >> 20) & 1; desc->p = (flags >> 15) & 1; desc->dpl = (flags >> 13) & 3; desc->s = (flags >> 12) & 1; desc->type = (flags >> 8) & 15; } static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) { struct desc_struct desc; int offset; u16 selector; selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4); if (n < 3) offset = 0x7f84 + n * 12; else offset = 0x7f2c + (n - 3) * 12; set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset)); ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); return X86EMUL_CONTINUE; } static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) { struct desc_struct desc; int offset; u16 selector; u32 base3; offset = 0x7e00 + n * 16; selector = GET_SMSTATE(u16, smbase, offset); rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); base3 = GET_SMSTATE(u32, smbase, offset + 12); ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); return X86EMUL_CONTINUE; } static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, u64 cr0, u64 cr4) { int bad; /* * First enable PAE, long mode needs it before CR0.PG = 1 is set. * Then enable protected mode. However, PCID cannot be enabled * if EFER.LMA=0, so set it separately. */ bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); if (bad) return X86EMUL_UNHANDLEABLE; bad = ctxt->ops->set_cr(ctxt, 0, cr0); if (bad) return X86EMUL_UNHANDLEABLE; if (cr4 & X86_CR4_PCIDE) { bad = ctxt->ops->set_cr(ctxt, 4, cr4); if (bad) return X86EMUL_UNHANDLEABLE; } return X86EMUL_CONTINUE; } static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; struct desc_ptr dt; u16 selector; u32 val, cr0, cr4; int i; cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); for (i = 0; i < 8; i++) *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4); val = GET_SMSTATE(u32, smbase, 0x7fcc); ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); val = GET_SMSTATE(u32, smbase, 0x7fc8); ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); selector = GET_SMSTATE(u32, smbase, 0x7fc4); set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64)); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60)); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c)); ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); selector = GET_SMSTATE(u32, smbase, 0x7fc0); set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80)); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c)); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78)); ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); dt.address = GET_SMSTATE(u32, smbase, 0x7f74); dt.size = GET_SMSTATE(u32, smbase, 0x7f70); ctxt->ops->set_gdt(ctxt, &dt); dt.address = GET_SMSTATE(u32, smbase, 0x7f58); dt.size = GET_SMSTATE(u32, smbase, 0x7f54); ctxt->ops->set_idt(ctxt, &dt); for (i = 0; i < 6; i++) { int r = rsm_load_seg_32(ctxt, smbase, i); if (r != X86EMUL_CONTINUE) return r; } cr4 = GET_SMSTATE(u32, smbase, 0x7f14); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); return rsm_enter_protected_mode(ctxt, cr0, cr4); } static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; struct desc_ptr dt; u64 val, cr0, cr4; u32 base3; u16 selector; int i, r; for (i = 0; i < 16; i++) *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78); ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED; val = GET_SMSTATE(u32, smbase, 0x7f68); ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); val = GET_SMSTATE(u32, smbase, 0x7f60); ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); cr0 = GET_SMSTATE(u64, smbase, 0x7f58); ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); cr4 = GET_SMSTATE(u64, smbase, 0x7f48); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); val = GET_SMSTATE(u64, smbase, 0x7ed0); ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); selector = GET_SMSTATE(u32, smbase, 0x7e90); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94)); set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98)); base3 = GET_SMSTATE(u32, smbase, 0x7e9c); ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); dt.size = GET_SMSTATE(u32, smbase, 0x7e84); dt.address = GET_SMSTATE(u64, smbase, 0x7e88); ctxt->ops->set_idt(ctxt, &dt); selector = GET_SMSTATE(u32, smbase, 0x7e70); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8); set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74)); set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78)); base3 = GET_SMSTATE(u32, smbase, 0x7e7c); ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); dt.size = GET_SMSTATE(u32, smbase, 0x7e64); dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); r = rsm_enter_protected_mode(ctxt, cr0, cr4); if (r != X86EMUL_CONTINUE) return r; for (i = 0; i < 6; i++) { r = rsm_load_seg_64(ctxt, smbase, i); if (r != X86EMUL_CONTINUE) return r; } return X86EMUL_CONTINUE; } static int em_rsm(struct x86_emulate_ctxt *ctxt) { unsigned long cr0, cr4, efer; u64 smbase; int ret; if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); /* * Get back to real mode, to prepare a safe state in which to load * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * supports long mode. */ cr4 = ctxt->ops->get_cr(ctxt, 4); if (emulator_has_longmode(ctxt)) { struct desc_struct cs_desc; /* Zero CR4.PCIDE before CR0.PG. */ if (cr4 & X86_CR4_PCIDE) { ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); cr4 &= ~X86_CR4_PCIDE; } /* A 32-bit code segment is required to clear EFER.LMA. */ memset(&cs_desc, 0, sizeof(cs_desc)); cs_desc.type = 0xb; cs_desc.s = cs_desc.g = cs_desc.p = 1; ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); } /* For the 64-bit case, this will clear EFER.LMA. */ cr0 = ctxt->ops->get_cr(ctxt, 0); if (cr0 & X86_CR0_PE) ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ if (cr4 & X86_CR4_PAE) ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); /* And finally go back to 32-bit mode. */ efer = 0; ctxt->ops->set_msr(ctxt, MSR_EFER, efer); smbase = ctxt->ops->get_smbase(ctxt); if (emulator_has_longmode(ctxt)) ret = rsm_load_state_64(ctxt, smbase + 0x8000); else ret = rsm_load_state_32(ctxt, smbase + 0x8000); if (ret != X86EMUL_CONTINUE) { /* FIXME: should triple fault */ return X86EMUL_UNHANDLEABLE; } if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt->ops->set_nmi_mask(ctxt, false); ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; ctxt->emul_flags &= ~X86EMUL_SMM_MASK; return X86EMUL_CONTINUE; } static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static bool vendor_intel(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 eax, ebx, ecx, edx; /* * syscall should always be enabled in longmode - so only become * vendor specific (cpuid) if other modes are active... */ if (ctxt->mode == X86EMUL_MODE_PROT64) return true; eax = 0x00000000; ecx = 0x00000000; ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit * longmode. Also an 64bit guest with a * 32bit compat-app running will #UD !! While this * behaviour can be fixed (by emulating) into AMD * response - CPUs of AMD can't behave like Intel. */ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) return false; /* AMD ("AuthenticAMD") */ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) return true; /* AMD ("AMDisbetter!") */ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; ctxt->eflags |= X86_EFLAGS_FIXED; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; ss_sel = cs_sel + 8; if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : (u32)msr_data; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); rcx = (u32)rcx; rdx = (u32)rdx; break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SEGMENT_RPL_MASK; ss_sel |= SEGMENT_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) { /* * Intel CPUs mask the counter and pointers in quite strange * manner when ECX is zero due to REP-string optimizations. */ #ifdef CONFIG_X86_64 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt)) return; *reg_write(ctxt, VCPU_REGS_RCX) = 0; switch (ctxt->b) { case 0xa4: /* movsb */ case 0xa5: /* movsd/w */ *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; /* fall through */ case 0xaa: /* stosb */ case 0xab: /* stosd/w */ *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; } #endif } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); return ret; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr, dr7; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS/task-gate: No check is performed since the * hardware checks it before exiting. */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } ops->get_dr(ctxt, 7, &dr7); ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; register_address_increment(ctxt, reg, df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); enum x86emul_mode prev_mode = ctxt->mode; old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) { pr_warn_once("faulting far call emulation tainted memory\n"); goto fail; } return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); ctxt->mode = prev_mode; return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } #define FFL(x) bit(X86_FEATURE_##x) static int em_movbe(struct x86_emulate_ctxt *ctxt) { u32 ebx, ecx, edx, eax = 1; u16 tmp; /* * Check MOVBE is set in the guest-visible CPUID leaf. */ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; return X86EMUL_CONTINUE; } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_hypercall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode == X86EMUL_MODE_PROT64 && is_noncanonical_address(desc_ptr.address)) return emulate_gp(ctxt, 0); if (lgdt) ctxt->ops->set_gdt(ctxt, &desc_ptr); else ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, true); } static int em_lidt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, false); } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, VCPU_REGS_RCX, -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static int em_clflush(struct x86_emulate_ctxt *ctxt) { /* emulating clflush regardless of cpuid */ return X86EMUL_CONTINUE; } static int em_movsxd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = (s32) ctxt->src.val; return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_read(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_cr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, 0, 0, 0, CR8_RESERVED_BITS, }; if (!valid_cr(cr)) return emulate_ud(ctxt); if (new_val & cr_reserved_bits[cr]) return emulate_gp(ctxt, 0); switch (cr) { case 0: { u64 cr4; if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) return emulate_gp(ctxt, 0); cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && !(cr4 & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } case 3: { u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; if (new_val & rsvd) return emulate_gp(ctxt, 0); break; } case 4: { ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } } return X86EMUL_CONTINUE; } static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; ctxt->ops->get_dr(ctxt, 7, &dr7); /* Check if DR7.Global_Enable is set */ return dr7 & (1 << 13); } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (check_dr7_gd(ctxt)) { ulong dr6; ctxt->ops->get_dr(ctxt, 6, &dr6); dr6 &= ~15; dr6 |= DR6_BD | DR6_RTM; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_hypercall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | NearBranch, em_call_near_abs), I(SrcMemFAddr | ImplicitOps, em_call_far), I(SrcMem | NearBranch, em_jmp_abs), I(SrcMemFAddr | ImplicitOps, em_jmp_far), I(SrcMem | Stack, em_push), D(Undefined), }; static const struct opcode group6[] = { DI(Prot | DstMem, sldt), DI(Prot | DstMem, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, N, } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_ae_7 = { I(SrcMem | ByteOp, em_clflush), N, N, N, }; static const struct group_dual group15 = { { N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), }, { N, N, N, N, N, N, N, N, } }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct instr_dual instr_dual_0f_2b = { I(0, em_mov), N }; static const struct gprefix pfx_0f_2b = { ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct instr_dual instr_dual_0f_c3 = { I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N }; static const struct mode_dual mode_dual_63 = { N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd) }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, MD(ModRM, &mode_dual_63), N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte | NearBranch)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), D(ModRM | SrcMem | NoAccess | DstReg), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), I(ImplicitOps | NearBranch, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), I(ImplicitOps | SrcImmU16, em_ret_far_imm), I(ImplicitOps, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte | NearBranch, em_loop)), I(SrcImmByte | NearBranch, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps | NearBranch), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_write), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm | NearBranch)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), II(EmulateOnUD | ImplicitOps, em_rsm, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), I(DstReg | SrcMem | ModRM, em_bsf_c), I(DstReg | SrcMem | ModRM, em_bsr_c), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, ID(0, &instr_dual_0f_c3), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct instr_dual instr_dual_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N }; static const struct instr_dual instr_dual_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N }; static const struct gprefix three_byte_0f_38_f0 = { ID(0, &instr_dual_0f_38_f0), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { ID(0, &instr_dual_0f_38_f1), N, N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef MD #undef ID #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RDI); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->type = OP_IMM; op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RSI); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; case InstrDual: if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.idual->mod3; else opcode = opcode.u.idual->mod012; break; case ModeDual: if (ctxt->mode == X86EMUL_MODE_PROT64) opcode = opcode.u.mdual->mode64; else opcode = opcode.u.mdual->mode32; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| No16))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64) { if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt->op_bytes = 8; else if (ctxt->d & NearBranch) ctxt->op_bytes = 8; } if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt->op_bytes = 4; if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { bool fault = false; ctxt->ops->get_fpu(ctxt); asm volatile("1: fwait \n\t" "2: \n\t" ".pushsection .fixup,\"ax\" \n\t" "3: \n\t" "movb $1, %[fault] \n\t" "jmp 2b \n\t" ".popsection \n\t" _ASM_EXTABLE(1b, 3b) : [fault]"+qm"(fault)); ctxt->ops->put_fpu(ctxt); if (unlikely(fault)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { if (op->type == OP_MM) read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop) : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->rip_relative, 0, (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { string_registers_quirk(ctxt); ctxt->eip = ctxt->_eip; ctxt->eflags &= ~X86_EFLAGS_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) { if (!(ctxt->d & NoWrite) && rc == X86EMUL_PROPAGATE_FAULT && ctxt->exception.vector == PF_VECTOR) ctxt->exception.error_code |= PFERR_WRITE_MASK; goto done; } } /* Copy full 64-bit value for CMPXCHG8B. */ ctxt->dst.orig_val64 = ctxt->dst.val64; special_insn: if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= X86_EFLAGS_RF; else ctxt->eflags &= ~X86_EFLAGS_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & X86_EFLAGS_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= X86_EFLAGS_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~X86_EFLAGS_CF; break; case 0xf9: /* stc */ ctxt->eflags |= X86_EFLAGS_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~X86_EFLAGS_DF; break; case 0xfd: /* std */ ctxt->eflags |= X86_EFLAGS_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, VCPU_REGS_RCX, -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~X86_EFLAGS_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); }
gpl-2.0
talnoah/m8_sense
drivers/leds/leds-lm3697.c
124
8805
/* * LM3697 BL Driver * * Simple driver for TEXAS Instruments LM3697 Backlight driver chip * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/types.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include <mach/debug_display.h> #define LM3697_LED_DEV "LM3697-BL" #define LM3697_NAME "lm3697-bl" #define MAX_BRIGHTNESS (255) #define BANK_NONE 0x00 #define BANK_A 0x01 #define BANK_B 0x02 #define LM3697_REVISION_REG 0x00 #define LM3697_SW_RESET_REG 0x01 #define LM3697_HVLED_CURR_SINK_OUT_CFG_REG 0x10 #define LM3697_CTL_A_RAMP_TIME_REG 0x11 #define LM3697_CTL_B_RAMP_TIME_REG 0x12 #define LM3697_CTL_RUNTIME_RAMP_TIME_REG 0x13 #define LM3697_CTL_RUNTIME_RAMP_CFG_REG 0x14 #define LM3697_BRIGHTNESS_CFG_REG 0x16 #define LM3697_CTL_A_FULL_SCALE_CURR_REG 0x17 #define LM3697_CTL_B_FULL_SCALE_CURR_REG 0x18 #define LM3697_HVLED_CURR_SINK_FEEDBACK_REG 0x19 #define LM3697_BOOST_CTL_REG 0x1A #define LM3697_AUTO_FREQ_THRESHOLD_REG 0x1B #define LM3697_PWM_CFG_REG 0x1C #define LM3697_CTL_A_BRIGHTNESS_LSB_REG 0x20 #define LM3697_CTL_A_BRIGHTNESS_MSB_REG 0x21 #define LM3697_CTL_B_BRIGHTNESS_LSB_REG 0x22 #define LM3697_CTL_B_BRIGHTNESS_MSB_REG 0x23 #define LM3697_CTL_B_BANK_EN_REG 0x24 struct lm3697_data { struct led_classdev led_dev; struct i2c_client *client; struct i2c_adapter *adapter; unsigned short addr; struct mutex lock; struct work_struct work; enum led_brightness brightness; bool enable; bool bank_A; bool bank_B; u8 pwm_cfg; u8 boost_ctl; u8 ctl_bank_en; u16 *brt_code_table; }; static inline int platform_write_i2c_block(struct i2c_adapter *i2c_bus , u8 page , u8 offset , u16 count , u8 *values ) { struct i2c_msg msg; u8 *buffer; int ret; buffer = kmalloc(count + 1, GFP_KERNEL); if (!buffer) { pr_err("%s:%d buffer allocation failed\n",__FUNCTION__,__LINE__); return -ENOMEM; } buffer[0] = offset; memmove(&buffer[1], values, count); msg.flags = 0; msg.addr = page >> 1; msg.buf = buffer; msg.len = count + 1; ret = i2c_transfer(i2c_bus, &msg, 1); kfree(buffer); if (ret != 1) { pr_err("%s:%d I2c write failed 0x%02x:0x%02x\n" ,__FUNCTION__,__LINE__, page, offset); ret = -EIO; } else ret = 0; return ret; } static int lm3697_init_registers(struct lm3697_data *drvdata) { int err = 0; platform_write_i2c_block(drvdata->adapter, drvdata->addr, LM3697_BOOST_CTL_REG, 0x01, &drvdata->boost_ctl); platform_write_i2c_block(drvdata->adapter, drvdata->addr, LM3697_PWM_CFG_REG, 0x01, &drvdata->pwm_cfg); platform_write_i2c_block(drvdata->adapter, drvdata->addr, LM3697_CTL_B_BANK_EN_REG, 0x01, &drvdata->ctl_bank_en); drvdata->enable = true; PR_DISP_INFO("%s: \n",__func__); return err; } void lm3697_set_brightness(struct lm3697_data *drvdata, int brt_val) { u8 brt_LSB = 0; u8 brt_MSB = 0; int index = 0, remainder; int code, code1, code2; index = brt_val / 10; remainder = brt_val % 10; code1 = drvdata->brt_code_table[index]; code2 = drvdata->brt_code_table[index+1]; code = (code2 - code1) * remainder / 10 + code1; brt_LSB = code % 0x7; brt_MSB = (code >> 3) & 0xFF; if (drvdata->bank_B) { platform_write_i2c_block(drvdata->adapter, drvdata->addr, LM3697_CTL_B_BRIGHTNESS_LSB_REG, 0x01, &brt_LSB); platform_write_i2c_block(drvdata->adapter, drvdata->addr, LM3697_CTL_B_BRIGHTNESS_MSB_REG, 0x01, &brt_MSB); } if (drvdata->enable == false) lm3697_init_registers(drvdata); drvdata->brightness = brt_val; if (drvdata->brightness == 0) drvdata->enable = false; PR_DISP_INFO("%s: brt_val=%d code=%d\n",__func__, brt_val, code); } static void __lm3697_work(struct lm3697_data *led, enum led_brightness value) { mutex_lock(&led->lock); lm3697_set_brightness(led, value); mutex_unlock(&led->lock); } static void lm3697_work(struct work_struct *work) { struct lm3697_data *drvdata = container_of(work, struct lm3697_data, work); __lm3697_work(drvdata, drvdata->led_dev.brightness); return; } static void lm3697_brightness_set(struct led_classdev *led_cdev, enum led_brightness brt_val) { struct lm3697_data *drvdata; drvdata = container_of(led_cdev, struct lm3697_data, led_dev); schedule_work(&drvdata->work); } static int lm3697_get_dt_data(struct device *dev, struct lm3697_data *drvdata) { int rc; u32 tmp; struct device_node *of_node = NULL; int len; const char *data; u32 *buf; int i = 0; of_node = dev->of_node; rc = of_property_read_u32(of_node, "boost-ctl", &tmp); if (rc) { pr_err("%s:%d, dt not specified\n", __func__, __LINE__); return -EINVAL; } drvdata->boost_ctl = (!rc ? tmp : 0); pr_debug("%s : boost_ctl=0x%x\n",__func__, drvdata->boost_ctl); rc = of_property_read_u32(of_node, "pwm-cfg", &tmp); if (rc) { pr_err("%s:%d, dt not specified\n", __func__, __LINE__); return -EINVAL; } drvdata->pwm_cfg = (!rc ? tmp : 0); pr_debug("%s : pwm_cfg=0x%x\n",__func__, drvdata->pwm_cfg); rc = of_property_read_u32(of_node, "ctl-bank-en", &tmp); if (rc) { pr_err("%s:%d, dt not specified\n", __func__, __LINE__); return -EINVAL; } drvdata->ctl_bank_en = (!rc ? tmp : 0); pr_debug("%s : ctl_bank_en=0x%x\n",__func__, drvdata->ctl_bank_en); if (drvdata->ctl_bank_en & 0x01) drvdata->bank_A = true; if (drvdata->ctl_bank_en & 0x02) drvdata->bank_B = true; pr_debug("%s : bank_A=%d bank_B=%d\n",__func__, drvdata->bank_A, drvdata->bank_B); data = of_get_property(of_node, "brt-code-table", &len); if (!data) { pr_err("%s: read brt-code-table failed\n", __func__); return -ENOMEM; } len /= sizeof(u32); drvdata->led_dev.max_brightness = 10 * (len - 1); pr_debug("%s : max_brightness=%d\n",__func__, drvdata->led_dev.max_brightness); buf = kzalloc(len * sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; rc = of_property_read_u32_array(of_node, "brt-code-table", buf, len); if (rc) { pr_err("%s:%d, dt not specified\n",__func__, __LINE__); rc = -EINVAL; goto end; } drvdata->brt_code_table = kzalloc(len * sizeof(u16), GFP_KERNEL); if (!drvdata->brt_code_table) { pr_err("%s:%d, allocate memory failed\n",__func__, __LINE__); rc = -ENOMEM; goto end; } for (i=0; i < len; i++) { drvdata->brt_code_table[i] = (u16) buf[i]; pr_debug("%s : buf=%d i=%d\n",__func__, buf[i], i); } end: kfree(buf); return rc; } static int __devinit lm3697_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm3697_data *drvdata; int err = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("%s : I2C_FUNC_I2C not supported\n", __func__); err = -EIO; goto err_out; } if (!client->dev.of_node) { pr_err("%s : no device node\n", __func__); err = -ENOMEM; goto err_out; } drvdata = kzalloc(sizeof(struct lm3697_data), GFP_KERNEL); if (drvdata == NULL) { pr_err("%s : kzalloc failed\n", __func__); err = -ENOMEM; goto err_out; } drvdata->client = client; drvdata->adapter = client->adapter; drvdata->addr = client->addr; drvdata->brightness = LED_OFF; drvdata->enable = true; drvdata->led_dev.default_trigger = "bl-led-i2c-trigger"; drvdata->led_dev.name = LM3697_LED_DEV; drvdata->led_dev.brightness_set = lm3697_brightness_set; mutex_init(&drvdata->lock); INIT_WORK(&drvdata->work, lm3697_work); err = lm3697_get_dt_data(&client->dev, drvdata); if(err < 0) { pr_err("%s : get dt failed\n", __func__); err = -ENOMEM; goto err_init; } i2c_set_clientdata(client, drvdata); err = led_classdev_register(&client->dev, &drvdata->led_dev); if (err < 0) { pr_err("%s : Register led class failed\n", __func__); err = -ENODEV; goto err_init; } PR_DISP_INFO("%s \n",__func__); return 0; err_init: kfree(drvdata); err_out: return err; } static int __devexit lm3697_remove(struct i2c_client *client) { struct lm3697_data *drvdata = i2c_get_clientdata(client); led_classdev_unregister(&drvdata->led_dev); kfree(drvdata); return 0; } static const struct i2c_device_id lm3697_id[] = { {LM3697_NAME, 0}, {} }; static struct of_device_id match_table[] = { {.compatible = "ti-lm3697",} }; MODULE_DEVICE_TABLE(i2c, lm3697_id); static struct i2c_driver lm3697_i2c_driver = { .probe = lm3697_probe, .remove = __devexit_p(lm3697_remove), .id_table = lm3697_id, .driver = { .name = LM3697_NAME, .owner = THIS_MODULE, .of_match_table = match_table, }, }; module_i2c_driver(lm3697_i2c_driver); MODULE_DESCRIPTION("Back Light driver for LM3697"); MODULE_LICENSE("GPL v2");
gpl-2.0
ultrasystem/uavlinux-x3
fs/xfs/xfs_mount.c
124
52159
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_inum.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_inode.h" #include "xfs_dir2.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_rtalloc.h" #include "xfs_bmap.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_log.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_fsops.h" #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_dinode.h" #ifdef HAVE_PERCPU_SB STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, int); STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); #else #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) #endif static DEFINE_MUTEX(xfs_uuid_table_mutex); static int xfs_uuid_table_size; static uuid_t *xfs_uuid_table; /* * See if the UUID is unique among mounted XFS filesystems. * Mount fails if UUID is nil or a FS with the same UUID is already mounted. */ STATIC int xfs_uuid_mount( struct xfs_mount *mp) { uuid_t *uuid = &mp->m_sb.sb_uuid; int hole, i; if (mp->m_flags & XFS_MOUNT_NOUUID) return 0; if (uuid_is_nil(uuid)) { xfs_warn(mp, "Filesystem has nil UUID - can't mount"); return XFS_ERROR(EINVAL); } mutex_lock(&xfs_uuid_table_mutex); for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { if (uuid_is_nil(&xfs_uuid_table[i])) { hole = i; continue; } if (uuid_equal(uuid, &xfs_uuid_table[i])) goto out_duplicate; } if (hole < 0) { xfs_uuid_table = kmem_realloc(xfs_uuid_table, (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), xfs_uuid_table_size * sizeof(*xfs_uuid_table), KM_SLEEP); hole = xfs_uuid_table_size++; } xfs_uuid_table[hole] = *uuid; mutex_unlock(&xfs_uuid_table_mutex); return 0; out_duplicate: mutex_unlock(&xfs_uuid_table_mutex); xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); return XFS_ERROR(EINVAL); } STATIC void xfs_uuid_unmount( struct xfs_mount *mp) { uuid_t *uuid = &mp->m_sb.sb_uuid; int i; if (mp->m_flags & XFS_MOUNT_NOUUID) return; mutex_lock(&xfs_uuid_table_mutex); for (i = 0; i < xfs_uuid_table_size; i++) { if (uuid_is_nil(&xfs_uuid_table[i])) continue; if (!uuid_equal(uuid, &xfs_uuid_table[i])) continue; memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); break; } ASSERT(i < xfs_uuid_table_size); mutex_unlock(&xfs_uuid_table_mutex); } STATIC void __xfs_free_perag( struct rcu_head *head) { struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); ASSERT(atomic_read(&pag->pag_ref) == 0); kmem_free(pag); } /* * Free up the per-ag resources associated with the mount structure. */ STATIC void xfs_free_perag( xfs_mount_t *mp) { xfs_agnumber_t agno; struct xfs_perag *pag; for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { spin_lock(&mp->m_perag_lock); pag = radix_tree_delete(&mp->m_perag_tree, agno); spin_unlock(&mp->m_perag_lock); ASSERT(pag); ASSERT(atomic_read(&pag->pag_ref) == 0); call_rcu(&pag->rcu_head, __xfs_free_perag); } } /* * Check size of device based on the (data/realtime) block count. * Note: this check is used by the growfs code as well as mount. */ int xfs_sb_validate_fsb_count( xfs_sb_t *sbp, __uint64_t nblocks) { ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); ASSERT(sbp->sb_blocklog >= BBSHIFT); #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) return EFBIG; #else /* Limited by UINT_MAX of sectors */ if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) return EFBIG; #endif return 0; } int xfs_initialize_perag( xfs_mount_t *mp, xfs_agnumber_t agcount, xfs_agnumber_t *maxagi) { xfs_agnumber_t index; xfs_agnumber_t first_initialised = 0; xfs_perag_t *pag; xfs_agino_t agino; xfs_ino_t ino; xfs_sb_t *sbp = &mp->m_sb; int error = -ENOMEM; /* * Walk the current per-ag tree so we don't try to initialise AGs * that already exist (growfs case). Allocate and insert all the * AGs we don't find ready for initialisation. */ for (index = 0; index < agcount; index++) { pag = xfs_perag_get(mp, index); if (pag) { xfs_perag_put(pag); continue; } if (!first_initialised) first_initialised = index; pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); if (!pag) goto out_unwind; pag->pag_agno = index; pag->pag_mount = mp; spin_lock_init(&pag->pag_ici_lock); mutex_init(&pag->pag_ici_reclaim_lock); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); spin_lock_init(&pag->pag_buf_lock); pag->pag_buf_tree = RB_ROOT; if (radix_tree_preload(GFP_NOFS)) goto out_unwind; spin_lock(&mp->m_perag_lock); if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { BUG(); spin_unlock(&mp->m_perag_lock); radix_tree_preload_end(); error = -EEXIST; goto out_unwind; } spin_unlock(&mp->m_perag_lock); radix_tree_preload_end(); } /* * If we mount with the inode64 option, or no inode overflows * the legacy 32-bit address space clear the inode32 option. */ agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) mp->m_flags |= XFS_MOUNT_32BITINODES; else mp->m_flags &= ~XFS_MOUNT_32BITINODES; if (mp->m_flags & XFS_MOUNT_32BITINODES) index = xfs_set_inode32(mp); else index = xfs_set_inode64(mp); if (maxagi) *maxagi = index; return 0; out_unwind: kmem_free(pag); for (; index > first_initialised; index--) { pag = radix_tree_delete(&mp->m_perag_tree, index); kmem_free(pag); } return error; } /* * xfs_readsb * * Does the initial read of the superblock. */ int xfs_readsb( struct xfs_mount *mp, int flags) { unsigned int sector_size; struct xfs_buf *bp; struct xfs_sb *sbp = &mp->m_sb; int error; int loud = !(flags & XFS_MFSI_QUIET); const struct xfs_buf_ops *buf_ops; ASSERT(mp->m_sb_bp == NULL); ASSERT(mp->m_ddev_targp != NULL); /* * For the initial read, we must guess at the sector * size based on the block device. It's enough to * get the sb_sectsize out of the superblock and * then reread with the proper length. * We don't verify it yet, because it may not be complete. */ sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); buf_ops = NULL; /* * Allocate a (locked) buffer to hold the superblock. * This will be kept around at all times to optimize * access to the superblock. */ reread: bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), 0, buf_ops); if (!bp) { if (loud) xfs_warn(mp, "SB buffer read failed"); return EIO; } if (bp->b_error) { error = bp->b_error; if (loud) xfs_warn(mp, "SB validate failed with error %d.", error); goto release_buf; } /* * Initialize the mount structure from the superblock. */ xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); /* * If we haven't validated the superblock, do so now before we try * to check the sector size and reread the superblock appropriately. */ if (sbp->sb_magicnum != XFS_SB_MAGIC) { if (loud) xfs_warn(mp, "Invalid superblock magic number"); error = EINVAL; goto release_buf; } /* * We must be able to do sector-sized and sector-aligned IO. */ if (sector_size > sbp->sb_sectsize) { if (loud) xfs_warn(mp, "device supports %u byte sectors (not %u)", sector_size, sbp->sb_sectsize); error = ENOSYS; goto release_buf; } if (buf_ops == NULL) { /* * Re-read the superblock so the buffer is correctly sized, * and properly verified. */ xfs_buf_relse(bp); sector_size = sbp->sb_sectsize; buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; goto reread; } /* Initialize per-cpu counters */ xfs_icsb_reinit_counters(mp); /* no need to be quiet anymore, so reset the buf ops */ bp->b_ops = &xfs_sb_buf_ops; mp->m_sb_bp = bp; xfs_buf_unlock(bp); return 0; release_buf: xfs_buf_relse(bp); return error; } /* * Update alignment values based on mount options and sb values */ STATIC int xfs_update_alignment(xfs_mount_t *mp) { xfs_sb_t *sbp = &(mp->m_sb); if (mp->m_dalign) { /* * If stripe unit and stripe width are not multiples * of the fs blocksize turn off alignment. */ if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || (BBTOB(mp->m_swidth) & mp->m_blockmask)) { xfs_warn(mp, "alignment check failed: sunit/swidth vs. blocksize(%d)", sbp->sb_blocksize); return XFS_ERROR(EINVAL); } else { /* * Convert the stripe unit and width to FSBs. */ mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { xfs_warn(mp, "alignment check failed: sunit/swidth vs. agsize(%d)", sbp->sb_agblocks); return XFS_ERROR(EINVAL); } else if (mp->m_dalign) { mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); } else { xfs_warn(mp, "alignment check failed: sunit(%d) less than bsize(%d)", mp->m_dalign, sbp->sb_blocksize); return XFS_ERROR(EINVAL); } } /* * Update superblock with new values * and log changes */ if (xfs_sb_version_hasdalign(sbp)) { if (sbp->sb_unit != mp->m_dalign) { sbp->sb_unit = mp->m_dalign; mp->m_update_flags |= XFS_SB_UNIT; } if (sbp->sb_width != mp->m_swidth) { sbp->sb_width = mp->m_swidth; mp->m_update_flags |= XFS_SB_WIDTH; } } else { xfs_warn(mp, "cannot change alignment: superblock does not support data alignment"); return XFS_ERROR(EINVAL); } } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && xfs_sb_version_hasdalign(&mp->m_sb)) { mp->m_dalign = sbp->sb_unit; mp->m_swidth = sbp->sb_width; } return 0; } /* * Set the maximum inode count for this filesystem */ STATIC void xfs_set_maxicount(xfs_mount_t *mp) { xfs_sb_t *sbp = &(mp->m_sb); __uint64_t icount; if (sbp->sb_imax_pct) { /* * Make sure the maximum inode count is a multiple * of the units we allocate inodes in. */ icount = sbp->sb_dblocks * sbp->sb_imax_pct; do_div(icount, 100); do_div(icount, mp->m_ialloc_blks); mp->m_maxicount = (icount * mp->m_ialloc_blks) << sbp->sb_inopblog; } else { mp->m_maxicount = 0; } } /* * Set the default minimum read and write sizes unless * already specified in a mount option. * We use smaller I/O sizes when the file system * is being used for NFS service (wsync mount option). */ STATIC void xfs_set_rw_sizes(xfs_mount_t *mp) { xfs_sb_t *sbp = &(mp->m_sb); int readio_log, writeio_log; if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { if (mp->m_flags & XFS_MOUNT_WSYNC) { readio_log = XFS_WSYNC_READIO_LOG; writeio_log = XFS_WSYNC_WRITEIO_LOG; } else { readio_log = XFS_READIO_LOG_LARGE; writeio_log = XFS_WRITEIO_LOG_LARGE; } } else { readio_log = mp->m_readio_log; writeio_log = mp->m_writeio_log; } if (sbp->sb_blocklog > readio_log) { mp->m_readio_log = sbp->sb_blocklog; } else { mp->m_readio_log = readio_log; } mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); if (sbp->sb_blocklog > writeio_log) { mp->m_writeio_log = sbp->sb_blocklog; } else { mp->m_writeio_log = writeio_log; } mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); } /* * precalculate the low space thresholds for dynamic speculative preallocation. */ void xfs_set_low_space_thresholds( struct xfs_mount *mp) { int i; for (i = 0; i < XFS_LOWSP_MAX; i++) { __uint64_t space = mp->m_sb.sb_dblocks; do_div(space, 100); mp->m_low_space[i] = space * (i + 1); } } /* * Set whether we're using inode alignment. */ STATIC void xfs_set_inoalignment(xfs_mount_t *mp) { if (xfs_sb_version_hasalign(&mp->m_sb) && mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; else mp->m_inoalign_mask = 0; /* * If we are using stripe alignment, check whether * the stripe unit is a multiple of the inode alignment */ if (mp->m_dalign && mp->m_inoalign_mask && !(mp->m_dalign & mp->m_inoalign_mask)) mp->m_sinoalign = mp->m_dalign; else mp->m_sinoalign = 0; } /* * Check that the data (and log if separate) is an ok size. */ STATIC int xfs_check_sizes(xfs_mount_t *mp) { xfs_buf_t *bp; xfs_daddr_t d; d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { xfs_warn(mp, "filesystem size mismatch detected"); return XFS_ERROR(EFBIG); } bp = xfs_buf_read_uncached(mp->m_ddev_targp, d - XFS_FSS_TO_BB(mp, 1), XFS_FSS_TO_BB(mp, 1), 0, NULL); if (!bp) { xfs_warn(mp, "last sector read failed"); return EIO; } xfs_buf_relse(bp); if (mp->m_logdev_targp != mp->m_ddev_targp) { d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { xfs_warn(mp, "log size mismatch detected"); return XFS_ERROR(EFBIG); } bp = xfs_buf_read_uncached(mp->m_logdev_targp, d - XFS_FSB_TO_BB(mp, 1), XFS_FSB_TO_BB(mp, 1), 0, NULL); if (!bp) { xfs_warn(mp, "log device read failed"); return EIO; } xfs_buf_relse(bp); } return 0; } /* * Clear the quotaflags in memory and in the superblock. */ int xfs_mount_reset_sbqflags( struct xfs_mount *mp) { int error; struct xfs_trans *tp; mp->m_qflags = 0; /* * It is OK to look at sb_qflags here in mount path, * without m_sb_lock. */ if (mp->m_sb.sb_qflags == 0) return 0; spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = 0; spin_unlock(&mp->m_sb_lock); /* * If the fs is readonly, let the incore superblock run * with quotas off but don't flush the update out to disk */ if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); if (error) { xfs_trans_cancel(tp, 0); xfs_alert(mp, "%s: Superblock update failed!", __func__); return error; } xfs_mod_sb(tp, XFS_SB_QFLAGS); return xfs_trans_commit(tp, 0); } __uint64_t xfs_default_resblks(xfs_mount_t *mp) { __uint64_t resblks; /* * We default to 5% or 8192 fsbs of space reserved, whichever is * smaller. This is intended to cover concurrent allocation * transactions when we initially hit enospc. These each require a 4 * block reservation. Hence by default we cover roughly 2000 concurrent * allocation reservations. */ resblks = mp->m_sb.sb_dblocks; do_div(resblks, 20); resblks = min_t(__uint64_t, resblks, 8192); return resblks; } /* * This function does the following on an initial mount of a file system: * - reads the superblock from disk and init the mount struct * - if we're a 32-bit kernel, do a size check on the superblock * so we don't mount terabyte filesystems * - init mount struct realtime fields * - allocate inode hash table for fs * - init directory manager * - perform recovery and init the log manager */ int xfs_mountfs( xfs_mount_t *mp) { xfs_sb_t *sbp = &(mp->m_sb); xfs_inode_t *rip; __uint64_t resblks; uint quotamount = 0; uint quotaflags = 0; int error = 0; xfs_sb_mount_common(mp, sbp); /* * Check for a mismatched features2 values. Older kernels * read & wrote into the wrong sb offset for sb_features2 * on some platforms due to xfs_sb_t not being 64bit size aligned * when sb_features2 was added, which made older superblock * reading/writing routines swap it as a 64-bit value. * * For backwards compatibility, we make both slots equal. * * If we detect a mismatched field, we OR the set bits into the * existing features2 field in case it has already been modified; we * don't want to lose any features. We then update the bad location * with the ORed value so that older kernels will see any features2 * flags, and mark the two fields as needing updates once the * transaction subsystem is online. */ if (xfs_sb_has_mismatched_features2(sbp)) { xfs_warn(mp, "correcting sb_features alignment problem"); sbp->sb_features2 |= sbp->sb_bad_features2; sbp->sb_bad_features2 = sbp->sb_features2; mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; /* * Re-check for ATTR2 in case it was found in bad_features2 * slot. */ if (xfs_sb_version_hasattr2(&mp->m_sb) && !(mp->m_flags & XFS_MOUNT_NOATTR2)) mp->m_flags |= XFS_MOUNT_ATTR2; } if (xfs_sb_version_hasattr2(&mp->m_sb) && (mp->m_flags & XFS_MOUNT_NOATTR2)) { xfs_sb_version_removeattr2(&mp->m_sb); mp->m_update_flags |= XFS_SB_FEATURES2; /* update sb_versionnum for the clearing of the morebits */ if (!sbp->sb_features2) mp->m_update_flags |= XFS_SB_VERSIONNUM; } /* * Check if sb_agblocks is aligned at stripe boundary * If sb_agblocks is NOT aligned turn off m_dalign since * allocator alignment is within an ag, therefore ag has * to be aligned at stripe boundary. */ error = xfs_update_alignment(mp); if (error) goto out; xfs_alloc_compute_maxlevels(mp); xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); xfs_ialloc_compute_maxlevels(mp); xfs_set_maxicount(mp); error = xfs_uuid_mount(mp); if (error) goto out; /* * Set the minimum read and write sizes */ xfs_set_rw_sizes(mp); /* set the low space thresholds for dynamic preallocation */ xfs_set_low_space_thresholds(mp); /* * Set the inode cluster size. * This may still be overridden by the file system * block size if it is larger than the chosen cluster size. * * For v5 filesystems, scale the cluster size with the inode size to * keep a constant ratio of inode per cluster buffer, but only if mkfs * has set the inode alignment value appropriately for larger cluster * sizes. */ mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; if (xfs_sb_version_hascrc(&mp->m_sb)) { int new_size = mp->m_inode_cluster_size; new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) mp->m_inode_cluster_size = new_size; xfs_info(mp, "Using inode cluster size of %d bytes", mp->m_inode_cluster_size); } /* * Set inode alignment fields */ xfs_set_inoalignment(mp); /* * Check that the data (and log if separate) is an ok size. */ error = xfs_check_sizes(mp); if (error) goto out_remove_uuid; /* * Initialize realtime fields in the mount structure */ error = xfs_rtmount_init(mp); if (error) { xfs_warn(mp, "RT mount failed"); goto out_remove_uuid; } /* * Copies the low order bits of the timestamp and the randomly * set "sequence" number out of a UUID. */ uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); mp->m_dmevmask = 0; /* not persistent; set after each mount */ xfs_dir_mount(mp); /* * Initialize the attribute manager's entries. */ mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; /* * Initialize the precomputed transaction reservations values. */ xfs_trans_init(mp); /* * Allocate and initialize the per-ag data. */ spin_lock_init(&mp->m_perag_lock); INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); if (error) { xfs_warn(mp, "Failed per-ag init: %d", error); goto out_remove_uuid; } if (!sbp->sb_logblocks) { xfs_warn(mp, "no log defined"); XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto out_free_perag; } /* * log's mount-time initialization. Perform 1st part recovery if needed */ error = xfs_log_mount(mp, mp->m_logdev_targp, XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); if (error) { xfs_warn(mp, "log mount failed"); goto out_fail_wait; } /* * Now the log is mounted, we know if it was an unclean shutdown or * not. If it was, with the first phase of recovery has completed, we * have consistent AG blocks on disk. We have not recovered EFIs yet, * but they are recovered transactionally in the second recovery phase * later. * * Hence we can safely re-initialise incore superblock counters from * the per-ag data. These may not be correct if the filesystem was not * cleanly unmounted, so we need to wait for recovery to finish before * doing this. * * If the filesystem was cleanly unmounted, then we can trust the * values in the superblock to be correct and we don't need to do * anything here. * * If we are currently making the filesystem, the initialisation will * fail as the perag data is in an undefined state. */ if (xfs_sb_version_haslazysbcount(&mp->m_sb) && !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && !mp->m_sb.sb_inprogress) { error = xfs_initialize_perag_data(mp, sbp->sb_agcount); if (error) goto out_fail_wait; } /* * Get and sanity-check the root inode. * Save the pointer to it in the mount structure. */ error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); if (error) { xfs_warn(mp, "failed to read root inode"); goto out_log_dealloc; } ASSERT(rip != NULL); if (unlikely(!S_ISDIR(rip->i_d.di_mode))) { xfs_warn(mp, "corrupted root inode %llu: not a directory", (unsigned long long)rip->i_ino); xfs_iunlock(rip, XFS_ILOCK_EXCL); XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto out_rele_rip; } mp->m_rootip = rip; /* save it */ xfs_iunlock(rip, XFS_ILOCK_EXCL); /* * Initialize realtime inode pointers in the mount structure */ error = xfs_rtmount_inodes(mp); if (error) { /* * Free up the root inode. */ xfs_warn(mp, "failed to read RT inodes"); goto out_rele_rip; } /* * If this is a read-only mount defer the superblock updates until * the next remount into writeable mode. Otherwise we would never * perform the update e.g. for the root filesystem. */ if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { error = xfs_mount_log_sb(mp, mp->m_update_flags); if (error) { xfs_warn(mp, "failed to write sb changes"); goto out_rtunmount; } } /* * Initialise the XFS quota management subsystem for this mount */ if (XFS_IS_QUOTA_RUNNING(mp)) { error = xfs_qm_newmount(mp, &quotamount, &quotaflags); if (error) goto out_rtunmount; } else { ASSERT(!XFS_IS_QUOTA_ON(mp)); /* * If a file system had quotas running earlier, but decided to * mount without -o uquota/pquota/gquota options, revoke the * quotachecked license. */ if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { xfs_notice(mp, "resetting quota flags"); error = xfs_mount_reset_sbqflags(mp); if (error) return error; } } /* * Finish recovering the file system. This part needed to be * delayed until after the root and real-time bitmap inodes * were consistently read in. */ error = xfs_log_mount_finish(mp); if (error) { xfs_warn(mp, "log mount finish failed"); goto out_rtunmount; } /* * Complete the quota initialisation, post-log-replay component. */ if (quotamount) { ASSERT(mp->m_qflags == 0); mp->m_qflags = quotaflags; xfs_qm_mount_quotas(mp); } /* * Now we are mounted, reserve a small amount of unused space for * privileged transactions. This is needed so that transaction * space required for critical operations can dip into this pool * when at ENOSPC. This is needed for operations like create with * attr, unwritten extent conversion at ENOSPC, etc. Data allocations * are not allowed to use this reserved space. * * This may drive us straight to ENOSPC on mount, but that implies * we were already there on the last unmount. Warn if this occurs. */ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { resblks = xfs_default_resblks(mp); error = xfs_reserve_blocks(mp, &resblks, NULL); if (error) xfs_warn(mp, "Unable to allocate reserve blocks. Continuing without reserve pool."); } return 0; out_rtunmount: xfs_rtunmount_inodes(mp); out_rele_rip: IRELE(rip); out_log_dealloc: xfs_log_unmount(mp); out_fail_wait: if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) xfs_wait_buftarg(mp->m_logdev_targp); xfs_wait_buftarg(mp->m_ddev_targp); out_free_perag: xfs_free_perag(mp); out_remove_uuid: xfs_uuid_unmount(mp); out: return error; } /* * This flushes out the inodes,dquots and the superblock, unmounts the * log and makes sure that incore structures are freed. */ void xfs_unmountfs( struct xfs_mount *mp) { __uint64_t resblks; int error; cancel_delayed_work_sync(&mp->m_eofblocks_work); xfs_qm_unmount_quotas(mp); xfs_rtunmount_inodes(mp); IRELE(mp->m_rootip); /* * We can potentially deadlock here if we have an inode cluster * that has been freed has its buffer still pinned in memory because * the transaction is still sitting in a iclog. The stale inodes * on that buffer will have their flush locks held until the * transaction hits the disk and the callbacks run. the inode * flush takes the flush lock unconditionally and with nothing to * push out the iclog we will never get that unlocked. hence we * need to force the log first. */ xfs_log_force(mp, XFS_LOG_SYNC); /* * Flush all pending changes from the AIL. */ xfs_ail_push_all_sync(mp->m_ail); /* * And reclaim all inodes. At this point there should be no dirty * inodes and none should be pinned or locked, but use synchronous * reclaim just to be sure. We can stop background inode reclaim * here as well if it is still running. */ cancel_delayed_work_sync(&mp->m_reclaim_work); xfs_reclaim_inodes(mp, SYNC_WAIT); xfs_qm_unmount(mp); /* * Unreserve any blocks we have so that when we unmount we don't account * the reserved free space as used. This is really only necessary for * lazy superblock counting because it trusts the incore superblock * counters to be absolutely correct on clean unmount. * * We don't bother correcting this elsewhere for lazy superblock * counting because on mount of an unclean filesystem we reconstruct the * correct counter value and this is irrelevant. * * For non-lazy counter filesystems, this doesn't matter at all because * we only every apply deltas to the superblock and hence the incore * value does not matter.... */ resblks = 0; error = xfs_reserve_blocks(mp, &resblks, NULL); if (error) xfs_warn(mp, "Unable to free reserved block pool. " "Freespace may not be correct on next mount."); error = xfs_log_sbcount(mp); if (error) xfs_warn(mp, "Unable to update superblock counters. " "Freespace may not be correct on next mount."); xfs_log_unmount(mp); xfs_uuid_unmount(mp); #if defined(DEBUG) xfs_errortag_clearall(mp, 0); #endif xfs_free_perag(mp); } int xfs_fs_writable(xfs_mount_t *mp) { return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)); } /* * xfs_log_sbcount * * Sync the superblock counters to disk. * * Note this code can be called during the process of freezing, so * we may need to use the transaction allocator which does not * block when the transaction subsystem is in its frozen state. */ int xfs_log_sbcount(xfs_mount_t *mp) { xfs_trans_t *tp; int error; if (!xfs_fs_writable(mp)) return 0; xfs_icsb_sync_counters(mp, 0); /* * we don't need to do this if we are updating the superblock * counters on every modification. */ if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) return 0; tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return error; } xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); return error; } /* * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply * a delta to a specified field in the in-core superblock. Simply * switch on the field indicated and apply the delta to that field. * Fields are not allowed to dip below zero, so if the delta would * do this do not apply it and return EINVAL. * * The m_sb_lock must be held when this routine is called. */ STATIC int xfs_mod_incore_sb_unlocked( xfs_mount_t *mp, xfs_sb_field_t field, int64_t delta, int rsvd) { int scounter; /* short counter for 32 bit fields */ long long lcounter; /* long counter for 64 bit fields */ long long res_used, rem; /* * With the in-core superblock spin lock held, switch * on the indicated field. Apply the delta to the * proper field. If the fields value would dip below * 0, then do not apply the delta and return EINVAL. */ switch (field) { case XFS_SBS_ICOUNT: lcounter = (long long)mp->m_sb.sb_icount; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_icount = lcounter; return 0; case XFS_SBS_IFREE: lcounter = (long long)mp->m_sb.sb_ifree; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_ifree = lcounter; return 0; case XFS_SBS_FDBLOCKS: lcounter = (long long) mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); if (delta > 0) { /* Putting blocks back */ if (res_used > delta) { mp->m_resblks_avail += delta; } else { rem = delta - res_used; mp->m_resblks_avail = mp->m_resblks; lcounter += rem; } } else { /* Taking blocks away */ lcounter += delta; if (lcounter >= 0) { mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); return 0; } /* * We are out of blocks, use any available reserved * blocks if were allowed to. */ if (!rsvd) return XFS_ERROR(ENOSPC); lcounter = (long long)mp->m_resblks_avail + delta; if (lcounter >= 0) { mp->m_resblks_avail = lcounter; return 0; } printk_once(KERN_WARNING "Filesystem \"%s\": reserve blocks depleted! " "Consider increasing reserve pool size.", mp->m_fsname); return XFS_ERROR(ENOSPC); } mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); return 0; case XFS_SBS_FREXTENTS: lcounter = (long long)mp->m_sb.sb_frextents; lcounter += delta; if (lcounter < 0) { return XFS_ERROR(ENOSPC); } mp->m_sb.sb_frextents = lcounter; return 0; case XFS_SBS_DBLOCKS: lcounter = (long long)mp->m_sb.sb_dblocks; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_dblocks = lcounter; return 0; case XFS_SBS_AGCOUNT: scounter = mp->m_sb.sb_agcount; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_agcount = scounter; return 0; case XFS_SBS_IMAX_PCT: scounter = mp->m_sb.sb_imax_pct; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_imax_pct = scounter; return 0; case XFS_SBS_REXTSIZE: scounter = mp->m_sb.sb_rextsize; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextsize = scounter; return 0; case XFS_SBS_RBMBLOCKS: scounter = mp->m_sb.sb_rbmblocks; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rbmblocks = scounter; return 0; case XFS_SBS_RBLOCKS: lcounter = (long long)mp->m_sb.sb_rblocks; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rblocks = lcounter; return 0; case XFS_SBS_REXTENTS: lcounter = (long long)mp->m_sb.sb_rextents; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextents = lcounter; return 0; case XFS_SBS_REXTSLOG: scounter = mp->m_sb.sb_rextslog; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextslog = scounter; return 0; default: ASSERT(0); return XFS_ERROR(EINVAL); } } /* * xfs_mod_incore_sb() is used to change a field in the in-core * superblock structure by the specified delta. This modification * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() * routine to do the work. */ int xfs_mod_incore_sb( struct xfs_mount *mp, xfs_sb_field_t field, int64_t delta, int rsvd) { int status; #ifdef HAVE_PERCPU_SB ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); #endif spin_lock(&mp->m_sb_lock); status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); spin_unlock(&mp->m_sb_lock); return status; } /* * Change more than one field in the in-core superblock structure at a time. * * The fields and changes to those fields are specified in the array of * xfs_mod_sb structures passed in. Either all of the specified deltas * will be applied or none of them will. If any modified field dips below 0, * then all modifications will be backed out and EINVAL will be returned. * * Note that this function may not be used for the superblock values that * are tracked with the in-memory per-cpu counters - a direct call to * xfs_icsb_modify_counters is required for these. */ int xfs_mod_incore_sb_batch( struct xfs_mount *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) { xfs_mod_sb_t *msbp; int error = 0; /* * Loop through the array of mod structures and apply each individually. * If any fail, then back out all those which have already been applied. * Do all of this within the scope of the m_sb_lock so that all of the * changes will be atomic. */ spin_lock(&mp->m_sb_lock); for (msbp = msb; msbp < (msb + nmsb); msbp++) { ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || msbp->msb_field > XFS_SBS_FDBLOCKS); error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, msbp->msb_delta, rsvd); if (error) goto unwind; } spin_unlock(&mp->m_sb_lock); return 0; unwind: while (--msbp >= msb) { error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, -msbp->msb_delta, rsvd); ASSERT(error == 0); } spin_unlock(&mp->m_sb_lock); return error; } /* * xfs_getsb() is called to obtain the buffer for the superblock. * The buffer is returned locked and read in from disk. * The buffer should be released with a call to xfs_brelse(). * * If the flags parameter is BUF_TRYLOCK, then we'll only return * the superblock buffer if it can be locked without sleeping. * If it can't then we'll return NULL. */ struct xfs_buf * xfs_getsb( struct xfs_mount *mp, int flags) { struct xfs_buf *bp = mp->m_sb_bp; if (!xfs_buf_trylock(bp)) { if (flags & XBF_TRYLOCK) return NULL; xfs_buf_lock(bp); } xfs_buf_hold(bp); ASSERT(XFS_BUF_ISDONE(bp)); return bp; } /* * Used to free the superblock along various error paths. */ void xfs_freesb( struct xfs_mount *mp) { struct xfs_buf *bp = mp->m_sb_bp; xfs_buf_lock(bp); mp->m_sb_bp = NULL; xfs_buf_relse(bp); } /* * Used to log changes to the superblock unit and width fields which could * be altered by the mount options, as well as any potential sb_features2 * fixup. Only the first superblock is updated. */ int xfs_mount_log_sb( xfs_mount_t *mp, __int64_t fields) { xfs_trans_t *tp; int error; ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | XFS_SB_VERSIONNUM)); tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return error; } xfs_mod_sb(tp, fields); error = xfs_trans_commit(tp, 0); return error; } /* * If the underlying (data/log/rt) device is readonly, there are some * operations that cannot proceed. */ int xfs_dev_is_read_only( struct xfs_mount *mp, char *message) { if (xfs_readonly_buftarg(mp->m_ddev_targp) || xfs_readonly_buftarg(mp->m_logdev_targp) || (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { xfs_notice(mp, "%s required on read-only device.", message); xfs_notice(mp, "write access unavailable, cannot proceed."); return EROFS; } return 0; } #ifdef HAVE_PERCPU_SB /* * Per-cpu incore superblock counters * * Simple concept, difficult implementation * * Basically, replace the incore superblock counters with a distributed per cpu * counter for contended fields (e.g. free block count). * * Difficulties arise in that the incore sb is used for ENOSPC checking, and * hence needs to be accurately read when we are running low on space. Hence * there is a method to enable and disable the per-cpu counters based on how * much "stuff" is available in them. * * Basically, a counter is enabled if there is enough free resource to justify * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local * ENOSPC), then we disable the counters to synchronise all callers and * re-distribute the available resources. * * If, once we redistributed the available resources, we still get a failure, * we disable the per-cpu counter and go through the slow path. * * The slow path is the current xfs_mod_incore_sb() function. This means that * when we disable a per-cpu counter, we need to drain its resources back to * the global superblock. We do this after disabling the counter to prevent * more threads from queueing up on the counter. * * Essentially, this means that we still need a lock in the fast path to enable * synchronisation between the global counters and the per-cpu counters. This * is not a problem because the lock will be local to a CPU almost all the time * and have little contention except when we get to ENOSPC conditions. * * Basically, this lock becomes a barrier that enables us to lock out the fast * path while we do things like enabling and disabling counters and * synchronising the counters. * * Locking rules: * * 1. m_sb_lock before picking up per-cpu locks * 2. per-cpu locks always picked up via for_each_online_cpu() order * 3. accurate counter sync requires m_sb_lock + per cpu locks * 4. modifying per-cpu counters requires holding per-cpu lock * 5. modifying global counters requires holding m_sb_lock * 6. enabling or disabling a counter requires holding the m_sb_lock * and _none_ of the per-cpu locks. * * Disabled counters are only ever re-enabled by a balance operation * that results in more free resources per CPU than a given threshold. * To ensure counters don't remain disabled, they are rebalanced when * the global resource goes above a higher threshold (i.e. some hysteresis * is present to prevent thrashing). */ #ifdef CONFIG_HOTPLUG_CPU /* * hot-plug CPU notifier support. * * We need a notifier per filesystem as we need to be able to identify * the filesystem to balance the counters out. This is achieved by * having a notifier block embedded in the xfs_mount_t and doing pointer * magic to get the mount pointer from the notifier block address. */ STATIC int xfs_icsb_cpu_notify( struct notifier_block *nfb, unsigned long action, void *hcpu) { xfs_icsb_cnts_t *cntp; xfs_mount_t *mp; mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); cntp = (xfs_icsb_cnts_t *) per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: /* Easy Case - initialize the area and locks, and * then rebalance when online does everything else for us. */ memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: xfs_icsb_lock(mp); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); xfs_icsb_unlock(mp); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* Disable all the counters, then fold the dead cpu's * count into the total on the global superblock and * re-enable the counters. */ xfs_icsb_lock(mp); spin_lock(&mp->m_sb_lock); xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); mp->m_sb.sb_icount += cntp->icsb_icount; mp->m_sb.sb_ifree += cntp->icsb_ifree; mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); spin_unlock(&mp->m_sb_lock); xfs_icsb_unlock(mp); break; } return NOTIFY_OK; } #endif /* CONFIG_HOTPLUG_CPU */ int xfs_icsb_init_counters( xfs_mount_t *mp) { xfs_icsb_cnts_t *cntp; int i; mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); if (mp->m_sb_cnts == NULL) return -ENOMEM; for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); } mutex_init(&mp->m_icsb_mutex); /* * start with all counters disabled so that the * initial balance kicks us off correctly */ mp->m_icsb_counters = -1; #ifdef CONFIG_HOTPLUG_CPU mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; mp->m_icsb_notifier.priority = 0; register_hotcpu_notifier(&mp->m_icsb_notifier); #endif /* CONFIG_HOTPLUG_CPU */ return 0; } void xfs_icsb_reinit_counters( xfs_mount_t *mp) { xfs_icsb_lock(mp); /* * start with all counters disabled so that the * initial balance kicks us off correctly */ mp->m_icsb_counters = -1; xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); xfs_icsb_unlock(mp); } void xfs_icsb_destroy_counters( xfs_mount_t *mp) { if (mp->m_sb_cnts) { unregister_hotcpu_notifier(&mp->m_icsb_notifier); free_percpu(mp->m_sb_cnts); } mutex_destroy(&mp->m_icsb_mutex); } STATIC void xfs_icsb_lock_cntr( xfs_icsb_cnts_t *icsbp) { while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { ndelay(1000); } } STATIC void xfs_icsb_unlock_cntr( xfs_icsb_cnts_t *icsbp) { clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); } STATIC void xfs_icsb_lock_all_counters( xfs_mount_t *mp) { xfs_icsb_cnts_t *cntp; int i; for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); xfs_icsb_lock_cntr(cntp); } } STATIC void xfs_icsb_unlock_all_counters( xfs_mount_t *mp) { xfs_icsb_cnts_t *cntp; int i; for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); xfs_icsb_unlock_cntr(cntp); } } STATIC void xfs_icsb_count( xfs_mount_t *mp, xfs_icsb_cnts_t *cnt, int flags) { xfs_icsb_cnts_t *cntp; int i; memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); if (!(flags & XFS_ICSB_LAZY_COUNT)) xfs_icsb_lock_all_counters(mp); for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); cnt->icsb_icount += cntp->icsb_icount; cnt->icsb_ifree += cntp->icsb_ifree; cnt->icsb_fdblocks += cntp->icsb_fdblocks; } if (!(flags & XFS_ICSB_LAZY_COUNT)) xfs_icsb_unlock_all_counters(mp); } STATIC int xfs_icsb_counter_disabled( xfs_mount_t *mp, xfs_sb_field_t field) { ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); return test_bit(field, &mp->m_icsb_counters); } STATIC void xfs_icsb_disable_counter( xfs_mount_t *mp, xfs_sb_field_t field) { xfs_icsb_cnts_t cnt; ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); /* * If we are already disabled, then there is nothing to do * here. We check before locking all the counters to avoid * the expensive lock operation when being called in the * slow path and the counter is already disabled. This is * safe because the only time we set or clear this state is under * the m_icsb_mutex. */ if (xfs_icsb_counter_disabled(mp, field)) return; xfs_icsb_lock_all_counters(mp); if (!test_and_set_bit(field, &mp->m_icsb_counters)) { /* drain back to superblock */ xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); switch(field) { case XFS_SBS_ICOUNT: mp->m_sb.sb_icount = cnt.icsb_icount; break; case XFS_SBS_IFREE: mp->m_sb.sb_ifree = cnt.icsb_ifree; break; case XFS_SBS_FDBLOCKS: mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; break; default: BUG(); } } xfs_icsb_unlock_all_counters(mp); } STATIC void xfs_icsb_enable_counter( xfs_mount_t *mp, xfs_sb_field_t field, uint64_t count, uint64_t resid) { xfs_icsb_cnts_t *cntp; int i; ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); xfs_icsb_lock_all_counters(mp); for_each_online_cpu(i) { cntp = per_cpu_ptr(mp->m_sb_cnts, i); switch (field) { case XFS_SBS_ICOUNT: cntp->icsb_icount = count + resid; break; case XFS_SBS_IFREE: cntp->icsb_ifree = count + resid; break; case XFS_SBS_FDBLOCKS: cntp->icsb_fdblocks = count + resid; break; default: BUG(); break; } resid = 0; } clear_bit(field, &mp->m_icsb_counters); xfs_icsb_unlock_all_counters(mp); } void xfs_icsb_sync_counters_locked( xfs_mount_t *mp, int flags) { xfs_icsb_cnts_t cnt; xfs_icsb_count(mp, &cnt, flags); if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) mp->m_sb.sb_icount = cnt.icsb_icount; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) mp->m_sb.sb_ifree = cnt.icsb_ifree; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; } /* * Accurate update of per-cpu counters to incore superblock */ void xfs_icsb_sync_counters( xfs_mount_t *mp, int flags) { spin_lock(&mp->m_sb_lock); xfs_icsb_sync_counters_locked(mp, flags); spin_unlock(&mp->m_sb_lock); } /* * Balance and enable/disable counters as necessary. * * Thresholds for re-enabling counters are somewhat magic. inode counts are * chosen to be the same number as single on disk allocation chunk per CPU, and * free blocks is something far enough zero that we aren't going thrash when we * get near ENOSPC. We also need to supply a minimum we require per cpu to * prevent looping endlessly when xfs_alloc_space asks for more than will * be distributed to a single CPU but each CPU has enough blocks to be * reenabled. * * Note that we can be called when counters are already disabled. * xfs_icsb_disable_counter() optimises the counter locking in this case to * prevent locking every per-cpu counter needlessly. */ #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) STATIC void xfs_icsb_balance_counter_locked( xfs_mount_t *mp, xfs_sb_field_t field, int min_per_cpu) { uint64_t count, resid; int weight = num_online_cpus(); uint64_t min = (uint64_t)min_per_cpu; /* disable counter and sync counter */ xfs_icsb_disable_counter(mp, field); /* update counters - first CPU gets residual*/ switch (field) { case XFS_SBS_ICOUNT: count = mp->m_sb.sb_icount; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) return; break; case XFS_SBS_IFREE: count = mp->m_sb.sb_ifree; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) return; break; case XFS_SBS_FDBLOCKS: count = mp->m_sb.sb_fdblocks; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) return; break; default: BUG(); count = resid = 0; /* quiet, gcc */ break; } xfs_icsb_enable_counter(mp, field, count, resid); } STATIC void xfs_icsb_balance_counter( xfs_mount_t *mp, xfs_sb_field_t fields, int min_per_cpu) { spin_lock(&mp->m_sb_lock); xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); spin_unlock(&mp->m_sb_lock); } int xfs_icsb_modify_counters( xfs_mount_t *mp, xfs_sb_field_t field, int64_t delta, int rsvd) { xfs_icsb_cnts_t *icsbp; long long lcounter; /* long counter for 64 bit fields */ int ret = 0; might_sleep(); again: preempt_disable(); icsbp = this_cpu_ptr(mp->m_sb_cnts); /* * if the counter is disabled, go to slow path */ if (unlikely(xfs_icsb_counter_disabled(mp, field))) goto slow_path; xfs_icsb_lock_cntr(icsbp); if (unlikely(xfs_icsb_counter_disabled(mp, field))) { xfs_icsb_unlock_cntr(icsbp); goto slow_path; } switch (field) { case XFS_SBS_ICOUNT: lcounter = icsbp->icsb_icount; lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_icount = lcounter; break; case XFS_SBS_IFREE: lcounter = icsbp->icsb_ifree; lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_ifree = lcounter; break; case XFS_SBS_FDBLOCKS: BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); break; default: BUG(); break; } xfs_icsb_unlock_cntr(icsbp); preempt_enable(); return 0; slow_path: preempt_enable(); /* * serialise with a mutex so we don't burn lots of cpu on * the superblock lock. We still need to hold the superblock * lock, however, when we modify the global structures. */ xfs_icsb_lock(mp); /* * Now running atomically. * * If the counter is enabled, someone has beaten us to rebalancing. * Drop the lock and try again in the fast path.... */ if (!(xfs_icsb_counter_disabled(mp, field))) { xfs_icsb_unlock(mp); goto again; } /* * The counter is currently disabled. Because we are * running atomically here, we know a rebalance cannot * be in progress. Hence we can go straight to operating * on the global superblock. We do not call xfs_mod_incore_sb() * here even though we need to get the m_sb_lock. Doing so * will cause us to re-enter this function and deadlock. * Hence we get the m_sb_lock ourselves and then call * xfs_mod_incore_sb_unlocked() as the unlocked path operates * directly on the global counters. */ spin_lock(&mp->m_sb_lock); ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); spin_unlock(&mp->m_sb_lock); /* * Now that we've modified the global superblock, we * may be able to re-enable the distributed counters * (e.g. lots of space just got freed). After that * we are done. */ if (ret != ENOSPC) xfs_icsb_balance_counter(mp, field, 0); xfs_icsb_unlock(mp); return ret; balance_counter: xfs_icsb_unlock_cntr(icsbp); preempt_enable(); /* * We may have multiple threads here if multiple per-cpu * counters run dry at the same time. This will mean we can * do more balances than strictly necessary but it is not * the common slowpath case. */ xfs_icsb_lock(mp); /* * running atomically. * * This will leave the counter in the correct state for future * accesses. After the rebalance, we simply try again and our retry * will either succeed through the fast path or slow path without * another balance operation being required. */ xfs_icsb_balance_counter(mp, field, delta); xfs_icsb_unlock(mp); goto again; } #endif
gpl-2.0
ztemt/NX507J_5.1_kernel
net/netfilter/nf_conntrack_core.c
380
46104
/* Connection state tracking for netfilter. This is separated from, but required by, the NAT layer; it can also be used by an iptables extension. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/moduleparam.h> #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/mm.h> #include <linux/nsproxy.h> #include <linux/rculist_nulls.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_core.h> #define NF_CONNTRACK_VERSION "0.5.0" int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) __read_mostly; EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); DEFINE_SPINLOCK(nf_conntrack_lock); EXPORT_SYMBOL_GPL(nf_conntrack_lock); unsigned int nf_conntrack_htable_size __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); unsigned int nf_conntrack_hash_rnd __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) { unsigned int n; /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); } static u32 __hash_bucket(u32 hash, unsigned int size) { return ((u64)hash * size) >> 32; } static u32 hash_bucket(u32 hash, const struct net *net) { return __hash_bucket(hash, net->ct.htable_size); } static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, u16 zone, unsigned int size) { return __hash_bucket(hash_conntrack_raw(tuple, zone), size); } static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { return __hash_conntrack(tuple, zone, net->ct.htable_size); } bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) return false; tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; return l4proto->pkt_to_tuple(skb, dataoff, tuple); } EXPORT_SYMBOL_GPL(nf_ct_get_tuple); bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct nf_conntrack_tuple *tuple) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int protoff; u_int8_t protonum; int ret; rcu_read_lock(); l3proto = __nf_ct_l3proto_find(l3num); ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); if (ret != NF_ACCEPT) { rcu_read_unlock(); return false; } l4proto = __nf_ct_l4proto_find(l3num, protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, l3proto, l4proto); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; if (l3proto->invert_tuple(inverse, orig) == 0) return false; inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; return l4proto->invert_tuple(inverse, orig); } EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); } static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; struct net *net = nf_ct_net(ct); struct nf_conntrack_l4proto *l4proto; pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); /* To make sure we don't get any weird locking issues here: * destroy_conntrack() MUST NOT be called with a write lock * to nf_conntrack_lock!!! -HW */ rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto && l4proto->destroy) l4proto->destroy(ct); rcu_read_unlock(); spin_lock_bh(&nf_conntrack_lock); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, * too. */ nf_ct_remove_expectations(ct); /* We overload first tuple to link into unconfirmed list. */ if (!nf_ct_is_confirmed(ct)) { BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); } NF_CT_STAT_INC(net, delete); spin_unlock_bh(&nf_conntrack_lock); if (ct->master) nf_ct_put(ct->master); pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); } void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); nf_ct_helper_destroy(ct); spin_lock_bh(&nf_conntrack_lock); /* Inside lock so preempt is disabled on module removal path. * Otherwise we can get spurious warnings. */ NF_CT_STAT_INC(net, delete_list); clean_from_lists(ct); spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); static void death_by_event(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct net *net = nf_ct_net(ct); if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { /* bad luck, let's retry again */ ct->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); add_timer(&ct->timeout); return; } /* we've got the event delivered, now it's dying */ set_bit(IPS_DYING_BIT, &ct->status); spin_lock(&nf_conntrack_lock); hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&nf_conntrack_lock); nf_ct_put(ct); } void nf_ct_insert_dying_list(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); /* add this conntrack to the dying list */ spin_lock_bh(&nf_conntrack_lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); /* set a new timer to retry event delivery */ setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); ct->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); add_timer(&ct->timeout); } EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); static void death_by_timeout(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_to_ns(ktime_get_real()); if (!test_bit(IPS_DYING_BIT, &ct->status) && unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { /* destroy event was not delivered */ nf_ct_delete_from_lists(ct); nf_ct_insert_dying_list(ct); return; } set_bit(IPS_DYING_BIT, &ct->status); nf_ct_delete_from_lists(ct); nf_ct_put(ct); } /* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) * OR * - Caller must lock nf_conntrack_lock before calling this function */ static struct nf_conntrack_tuple_hash * ____nf_conntrack_find(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int bucket = hash_bucket(hash, net); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. */ local_bh_disable(); begin: hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { if (nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { NF_CT_STAT_INC(net, found); local_bh_enable(); return h; } NF_CT_STAT_INC(net, searched); } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { NF_CT_STAT_INC(net, search_restart); goto begin; } local_bh_enable(); return NULL; } struct nf_conntrack_tuple_hash * __nf_conntrack_find(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { return ____nf_conntrack_find(net, zone, tuple, hash_conntrack_raw(tuple, zone)); } EXPORT_SYMBOL_GPL(__nf_conntrack_find); /* Find a connection corresponding to a tuple. */ static struct nf_conntrack_tuple_hash * __nf_conntrack_find_get(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); begin: h = ____nf_conntrack_find(net, zone, tuple, hash); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || nf_ct_zone(ct) != zone)) { nf_ct_put(ct); goto begin; } } } rcu_read_unlock(); return h; } struct nf_conntrack_tuple_hash * nf_conntrack_find_get(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, hash_conntrack_raw(tuple, zone)); } EXPORT_SYMBOL_GPL(nf_conntrack_find_get); static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int repl_hash) { struct net *net = nf_ct_net(ct); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &net->ct.hash[repl_hash]); } int nf_conntrack_hash_check_insert(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, repl_hash; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; u16 zone; zone = nf_ct_zone(ct); hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); spin_lock_bh(&nf_conntrack_lock); /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple) && zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &h->tuple) && zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; add_timer(&ct->timeout); nf_conntrack_get(&ct->ct_general); __nf_conntrack_hash_insert(ct, hash, repl_hash); NF_CT_STAT_INC(net, insert); spin_unlock_bh(&nf_conntrack_lock); return 0; out: NF_CT_STAT_INC(net, insert_failed); spin_unlock_bh(&nf_conntrack_lock); return -EEXIST; } EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) { unsigned int hash, repl_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conn_help *help; struct nf_conn_tstamp *tstamp; struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; u16 zone; ct = nf_ct_get(skb, &ctinfo); net = nf_ct_net(ct); /* ipt_REJECT uses nf_conntrack_attach to attach related ICMP/TCP RST packets in other direction. Actual packet which created connection will be IP_CT_NEW or for an expected connection, IP_CT_RELATED. */ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; zone = nf_ct_zone(ct); /* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; hash = hash_bucket(hash, net); repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); /* We're not in hash table, and we refuse to set up related connections for unconfirmed conns. But packet copies and REJECT will give spurious warnings here. */ /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ /* No external references means no one else could have confirmed us. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); spin_lock_bh(&nf_conntrack_lock); /* We have to check the DYING flag inside the lock to prevent a race against nf_ct_get_next_corpse() possibly called from user context, else we insert an already 'dead' hash, blocking further use of that particular connection -JM */ if (unlikely(nf_ct_is_dying(ct))) { spin_unlock_bh(&nf_conntrack_lock); return NF_ACCEPT; } /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple) && zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &h->tuple) && zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; /* Remove from unconfirmed list */ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ ct->timeout.expires += jiffies; add_timer(&ct->timeout); atomic_inc(&ct->ct_general.use); ct->status |= IPS_CONFIRMED; /* set conntrack timestamp, if enabled. */ tstamp = nf_conn_tstamp_find(ct); if (tstamp) { if (skb->tstamp.tv64 == 0) __net_timestamp((struct sk_buff *)skb); tstamp->start = ktime_to_ns(skb->tstamp); } /* Since the lookup is lockless, hash insertion must be done after * starting the timer and setting the CONFIRMED bit. The RCU barriers * guarantee that no other CPU can find the conntrack before the above * stores are visible. */ __nf_conntrack_hash_insert(ct, hash, repl_hash); NF_CT_STAT_INC(net, insert); spin_unlock_bh(&nf_conntrack_lock); help = nfct_help(ct); if (help && help->helper) nf_conntrack_event_cache(IPCT_HELPER, ct); nf_conntrack_event_cache(master_ct(ct) ? IPCT_RELATED : IPCT_NEW, ct); return NF_ACCEPT; out: NF_CT_STAT_INC(net, insert_failed); spin_unlock_bh(&nf_conntrack_lock); return NF_DROP; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); /* Returns true if a connection correspondings to the tuple (required for NAT). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nf_conn *ct; u16 zone = nf_ct_zone(ignored_conntrack); unsigned int hash = hash_conntrack(net, zone, tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. */ rcu_read_lock_bh(); hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone(ct) == zone) { NF_CT_STAT_INC(net, found); rcu_read_unlock_bh(); return 1; } NF_CT_STAT_INC(net, searched); } rcu_read_unlock_bh(); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); #define NF_CT_EVICTION_RANGE 8 /* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ static noinline int early_drop(struct net *net, unsigned int hash) { /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; struct nf_conn *ct = NULL, *tmp; struct hlist_nulls_node *n; unsigned int i, cnt = 0; int dropped = 0; rcu_read_lock(); for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) ct = tmp; cnt++; } if (ct != NULL) { if (likely(!nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use))) break; else ct = NULL; } if (cnt >= NF_CT_EVICTION_RANGE) break; hash = (hash + 1) % net->ct.htable_size; } rcu_read_unlock(); if (!ct) return dropped; if (del_timer(&ct->timeout)) { death_by_timeout((unsigned long)ct); /* Check if we indeed killed this entry. Reliable event delivery may have inserted it into the dying list. */ if (test_bit(IPS_DYING_BIT, &ct->status)) { dropped = 1; NF_CT_STAT_INC_ATOMIC(net, early_drop); } } nf_ct_put(ct); return dropped; } void init_nf_conntrack_hash_rnd(void) { unsigned int rand; /* * Why not initialize nf_conntrack_rnd in a "init()" function ? * Because there isn't enough entropy when system initializing, * and we initialize it as late as possible. */ do { get_random_bytes(&rand, sizeof(rand)); } while (!rand); cmpxchg(&nf_conntrack_hash_rnd, 0, rand); } static struct nf_conn * __nf_conntrack_alloc(struct net *net, u16 zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp, u32 hash) { struct nf_conn *ct; if (unlikely(!nf_conntrack_hash_rnd)) { init_nf_conntrack_hash_rnd(); /* recompute the hash as nf_conntrack_hash_rnd is initialized */ hash = hash_conntrack_raw(orig, zone); } /* We don't want any race condition at early drop stage */ atomic_inc(&net->ct.count); if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash_bucket(hash, net))) { atomic_dec(&net->ct.count); if (net_ratelimit()) printk(KERN_WARNING "nf_conntrack: table full, dropping" " packet.\n"); return ERR_PTR(-ENOMEM); } } /* * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); if (ct == NULL) { atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } /* * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. */ memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); write_pnet(&ct->ct_net, net); #if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE) ct->nattype_entry = 0; #endif #ifdef CONFIG_NF_CONNTRACK_ZONES if (zone) { struct nf_conntrack_zone *nf_ct_zone; nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); if (!nf_ct_zone) goto out_free; nf_ct_zone->id = zone; } #endif /* * changes to lookup keys must be done before setting refcnt to 1 */ smp_wmb(); atomic_set(&ct->ct_general.use, 1); return ct; #ifdef CONFIG_NF_CONNTRACK_ZONES out_free: atomic_dec(&net->ct.count); kmem_cache_free(net->ct.nf_conntrack_cachep, ct); return ERR_PTR(-ENOMEM); #endif } struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp) { return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); void nf_conntrack_free(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); nf_ct_ext_destroy(ct); nf_ct_ext_free(ct); kmem_cache_free(net->ct.nf_conntrack_cachep, ct); smp_mb__before_atomic_dec(); atomic_dec(&net->ct.count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ static struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff, u32 hash) { struct nf_conn *ct; struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_ecache *ecache; struct nf_conntrack_expect *exp; u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; struct nf_conn_timeout *timeout_ext; unsigned int *timeouts; if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { pr_debug("Can't invert tuple.\n"); return NULL; } ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, hash); if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct; timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; if (timeout_ext) timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); else timeouts = l4proto->get_timeouts(net); if (!l4proto->new(ct, skb, dataoff, timeouts)) { nf_conntrack_free(ct); pr_debug("init conntrack: can't track with proto module\n"); return NULL; } if (timeout_ext) nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC); nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ecache ? ecache->expmask : 0, GFP_ATOMIC); spin_lock_bh(&nf_conntrack_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = exp->master; if (exp->helper) { help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help) rcu_assign_pointer(help->helper, exp->helper); } #ifdef CONFIG_NF_CONNTRACK_MARK ct->mark = exp->master->mark; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; #endif /* Intialize the NAT type entry. */ #if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE) ct->nattype_entry = 0; #endif nf_conntrack_get(&ct->master->ct_general); NF_CT_STAT_INC(net, expect_new); } else { __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); NF_CT_STAT_INC(net, new); } /* Overload tuple linked list to put us in unconfirmed list. */ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.unconfirmed); spin_unlock_bh(&nf_conntrack_lock); if (exp) { if (exp->expectfn) exp->expectfn(ct, exp); nf_ct_expect_put(exp); } return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ static inline struct nf_conn * resolve_normal_ct(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, int *set_reply, enum ip_conntrack_info *ctinfo) { struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; u32 hash; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, &tuple, l3proto, l4proto)) { pr_debug("resolve_normal_ct: Can't get tuple\n"); return NULL; } /* look for tuple match */ hash = hash_conntrack_raw(&tuple, zone); h = __nf_conntrack_find_get(net, zone, &tuple, hash); if (!h) { h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, skb, dataoff, hash); if (!h) return NULL; if (IS_ERR(h)) return (void *)h; } ct = nf_ct_tuplehash_to_ctrack(h); /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { *ctinfo = IP_CT_ESTABLISHED_REPLY; /* Please set reply bit if this packet OK */ *set_reply = 1; } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { pr_debug("nf_conntrack_in: normal packet for %p\n", ct); *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { pr_debug("nf_conntrack_in: related packet for %p\n", ct); *ctinfo = IP_CT_RELATED; } else { pr_debug("nf_conntrack_in: new packet for %p\n", ct); *ctinfo = IP_CT_NEW; } *set_reply = 0; } skb->nfct = &ct->ct_general; skb->nfctinfo = *ctinfo; return ct; } unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { struct nf_conn *ct, *tmpl = NULL; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; struct nf_conn_timeout *timeout_ext; unsigned int *timeouts; unsigned int dataoff; u_int8_t protonum; int set_reply = 0; int ret; if (skb->nfct) { /* Previously seen (loopback or untracked)? Ignore. */ tmpl = (struct nf_conn *)skb->nfct; if (!nf_ct_is_template(tmpl)) { NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } skb->nfct = NULL; } /* rcu_read_lock()ed by nf_hook_slow */ l3proto = __nf_ct_l3proto_find(pf); ret = l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, &protonum); if (ret <= 0) { pr_debug("not prepared to track yet or error occurred\n"); NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } l4proto = __nf_ct_l4proto_find(pf, protonum); /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ if (l4proto->error != NULL) { ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, pf, hooknum); if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } /* ICMP[v6] protocol trackers may assign one conntrack. */ if (skb->nfct) goto out; } ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(net, invalid); ret = NF_ACCEPT; goto out; } if (IS_ERR(ct)) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); ret = NF_DROP; goto out; } NF_CT_ASSERT(skb->nfct); /* Decide what timeout policy we want to apply to this flow. */ timeout_ext = nf_ct_timeout_find(ct); if (timeout_ext) timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); else timeouts = l4proto->get_timeouts(net); ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(skb->nfct); skb->nfct = NULL; NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); ret = -ret; goto out; } if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: if (tmpl) { /* Special case: we have to repeat this hook, assign the * template again to this packet. We assume that this packet * has no conntrack assigned. This is used by nf_ct_tcp. */ if (ret == NF_REPEAT) skb->nfct = (struct nf_conntrack *)tmpl; else nf_ct_put(tmpl); } return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_in); bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig) { bool ret; rcu_read_lock(); ret = nf_ct_invert_tuple(inverse, orig, __nf_ct_l3proto_find(orig->src.l3num), __nf_ct_l4proto_find(orig->src.l3num, orig->dst.protonum)); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); /* Alter reply tuple (maybe alter helper). This is for NAT, and is implicitly racy: see __nf_conntrack_confirm */ void nf_conntrack_alter_reply(struct nf_conn *ct, const struct nf_conntrack_tuple *newreply) { struct nf_conn_help *help = nfct_help(ct); /* Should be unconfirmed, so not in hash table yet */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Altering reply tuple of %p to ", ct); nf_ct_dump_tuple(newreply); ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (ct->master || (help && !hlist_empty(&help->expectations))) return; rcu_read_lock(); __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, unsigned long extra_jiffies, int do_acct) { NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); /* Only update if this is not a fixed timeout */ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) goto acct; /* If not in hash table, timer will not be active yet */ if (!nf_ct_is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; } else { unsigned long newtime = jiffies + extra_jiffies; /* Only update the timeout if the new timeout is at least HZ jiffies from the old timeout. Need del_timer for race avoidance (may already be dying). */ if (newtime - ct->timeout.expires >= HZ) mod_timer_pending(&ct->timeout, newtime); } /* Refresh the NAT type entry. */ #if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE) (void)nattype_refresh_timer(ct->nattype_entry, ct->timeout.expires); #endif acct: if (do_acct) { struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (acct) { atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes); } } } EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, int do_acct) { if (do_acct) { struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (acct) { atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); atomic64_add(skb->len - skb_network_offset(skb), &acct[CTINFO2DIR(ctinfo)].bytes); } } if (del_timer(&ct->timeout)) { ct->timeout.function((unsigned long)ct); return true; } return false; } EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); #ifdef CONFIG_NF_CONNTRACK_ZONES static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { .len = sizeof(struct nf_conntrack_zone), .align = __alignof__(struct nf_conntrack_zone), .id = NF_CT_EXT_ZONE, }; #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <linux/mutex.h> /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); return 0; nla_put_failure: return -1; } EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, }; EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) return -EINVAL; t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); return 0; } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); int nf_ct_port_nlattr_tuple_size(void) { return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; /* This ICMP is in reverse direction to the packet which caused it */ ct = nf_ct_get(skb, &ctinfo); if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ctinfo = IP_CT_RELATED_REPLY; else ctinfo = IP_CT_RELATED; /* Attach to new skbuff, and increment count */ nskb->nfct = &ct->ct_general; nskb->nfctinfo = ctinfo; nf_conntrack_get(nskb->nfct); } /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); for (; *bucket < net->ct.htable_size; (*bucket)++) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) goto found; } } hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) set_bit(IPS_DYING_BIT, &ct->status); } spin_unlock_bh(&nf_conntrack_lock); return NULL; found: atomic_inc(&ct->ct_general.use); spin_unlock_bh(&nf_conntrack_lock); return ct; } void nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data) { struct nf_conn *ct; unsigned int bucket = 0; while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { /* Time to push up daises... */ if (del_timer(&ct->timeout)) death_by_timeout((unsigned long)ct); /* ... else the timer will get him soon. */ nf_ct_put(ct); } } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); struct __nf_ct_flush_report { u32 pid; int report; }; static int kill_report(struct nf_conn *i, void *data) { struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(i); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_to_ns(ktime_get_real()); /* If we fail to deliver the event, death_by_timeout() will retry */ if (nf_conntrack_event_report(IPCT_DESTROY, i, fr->pid, fr->report) < 0) return 1; /* Avoid the delivery of the destroy event in death_by_timeout(). */ set_bit(IPS_DYING_BIT, &i->status); return 1; } static int kill_all(struct nf_conn *i, void *data) { return 1; } void nf_ct_free_hashtable(void *hash, unsigned int size) { if (is_vmalloc_addr(hash)) vfree(hash); else free_pages((unsigned long)hash, get_order(sizeof(struct hlist_head) * size)); } EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); void nf_conntrack_flush_report(struct net *net, u32 pid, int report) { struct __nf_ct_flush_report fr = { .pid = pid, .report = report, }; nf_ct_iterate_cleanup(net, kill_report, &fr); } EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); static void nf_ct_release_dying_list(struct net *net) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); /* never fails to remove them, no listeners at this point */ nf_ct_kill(ct); } spin_unlock_bh(&nf_conntrack_lock); } static int untrack_refs(void) { int cnt = 0, cpu; for_each_possible_cpu(cpu) { struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); cnt += atomic_read(&ct->ct_general.use) - 1; } return cnt; } static void nf_conntrack_cleanup_init_net(void) { while (untrack_refs() > 0) schedule(); nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); #endif } static void nf_conntrack_cleanup_net(struct net *net) { i_see_dead_people: nf_ct_iterate_cleanup(net, kill_all, NULL); nf_ct_release_dying_list(net); if (atomic_read(&net->ct.count) != 0) { schedule(); goto i_see_dead_people; } nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); nf_conntrack_timeout_fini(net); nf_conntrack_ecache_fini(net); nf_conntrack_tstamp_fini(net); nf_conntrack_acct_fini(net); nf_conntrack_expect_fini(net); kmem_cache_destroy(net->ct.nf_conntrack_cachep); kfree(net->ct.slabname); free_percpu(net->ct.stat); } /* Mishearing the voices in his head, our hero wonders how he's supposed to kill the mall. */ void nf_conntrack_cleanup(struct net *net) { if (net_eq(net, &init_net)) RCU_INIT_POINTER(ip_ct_attach, NULL); /* This makes sure all current packets have passed through netfilter framework. Roll on, two-stage module delete... */ synchronize_net(); nf_conntrack_cleanup_net(net); if (net_eq(net, &init_net)) { RCU_INIT_POINTER(nf_ct_destroy, NULL); nf_conntrack_cleanup_init_net(); } } void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) { struct hlist_nulls_head *hash; unsigned int nr_slots, i; size_t sz; BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); if (!hash) { printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); hash = vzalloc(sz); } if (hash && nulls) for (i = 0; i < nr_slots; i++) INIT_HLIST_NULLS_HEAD(&hash[i], i); return hash; } EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { int i, bucket; unsigned int hashsize, old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; if (current->nsproxy->net_ns != &init_net) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); hashsize = simple_strtoul(val, NULL, 0); if (!hashsize) return -EINVAL; hash = nf_ct_alloc_hashtable(&hashsize, 1); if (!hash) return -ENOMEM; /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the lock. */ spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < init_net.ct.htable_size; i++) { while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), hashsize); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } old_size = init_net.ct.htable_size; old_hash = init_net.ct.hash; init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.hash = hash; spin_unlock_bh(&nf_conntrack_lock); nf_ct_free_hashtable(old_hash, old_size); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); void nf_ct_untracked_status_or(unsigned long bits) { int cpu; for_each_possible_cpu(cpu) per_cpu(nf_conntrack_untracked, cpu).status |= bits; } EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); static int nf_conntrack_init_init_net(void) { int max_factor = 8; int ret, cpu; /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ if (!nf_conntrack_htable_size) { nf_conntrack_htable_size = (((totalram_pages << PAGE_SHIFT) / 16384) / sizeof(struct hlist_head)); if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) nf_conntrack_htable_size = 32; /* Use a max. factor of four by default to get the same max as * with the old struct list_heads. When a table size is given * we use the old value of 8 to avoid reducing the max. * entries. */ max_factor = 4; } nf_conntrack_max = max_factor * nf_conntrack_htable_size; printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; ret = nf_conntrack_helper_init(); if (ret < 0) goto err_helper; #ifdef CONFIG_NF_CONNTRACK_ZONES ret = nf_ct_extend_register(&nf_ct_zone_extend); if (ret < 0) goto err_extend; #endif /* Set up fake conntrack: to never be deleted, not in any hashes */ for_each_possible_cpu(cpu) { struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); write_pnet(&ct->ct_net, &init_net); atomic_set(&ct->ct_general.use, 1); } /* - and look it like as a confirmed connection */ nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); return 0; #ifdef CONFIG_NF_CONNTRACK_ZONES err_extend: nf_conntrack_helper_fini(); #endif err_helper: nf_conntrack_proto_fini(); err_proto: return ret; } /* * We need to use special "null" values, not used in hash table */ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) #define DYING_NULLS_VAL ((1<<30)+1) static int nf_conntrack_init_net(struct net *net) { int ret; atomic_set(&net->ct.count, 0); INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) { ret = -ENOMEM; goto err_stat; } net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); if (!net->ct.slabname) { ret = -ENOMEM; goto err_slabname; } net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, sizeof(struct nf_conn), 0, SLAB_DESTROY_BY_RCU, NULL); if (!net->ct.nf_conntrack_cachep) { printk(KERN_ERR "Unable to create nf_conn slab cache\n"); ret = -ENOMEM; goto err_cache; } net->ct.htable_size = nf_conntrack_htable_size; net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); if (!net->ct.hash) { ret = -ENOMEM; printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); goto err_hash; } ret = nf_conntrack_expect_init(net); if (ret < 0) goto err_expect; ret = nf_conntrack_acct_init(net); if (ret < 0) goto err_acct; ret = nf_conntrack_tstamp_init(net); if (ret < 0) goto err_tstamp; ret = nf_conntrack_ecache_init(net); if (ret < 0) goto err_ecache; ret = nf_conntrack_timeout_init(net); if (ret < 0) goto err_timeout; return 0; err_timeout: nf_conntrack_ecache_fini(net); err_ecache: nf_conntrack_tstamp_fini(net); err_tstamp: nf_conntrack_acct_fini(net); err_acct: nf_conntrack_expect_fini(net); err_expect: nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); err_hash: kmem_cache_destroy(net->ct.nf_conntrack_cachep); err_cache: kfree(net->ct.slabname); err_slabname: free_percpu(net->ct.stat); err_stat: return ret; } s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); EXPORT_SYMBOL_GPL(nf_ct_nat_offset); int nf_conntrack_init(struct net *net) { int ret; if (net_eq(net, &init_net)) { ret = nf_conntrack_init_init_net(); if (ret < 0) goto out_init_net; } ret = nf_conntrack_init_net(net); if (ret < 0) goto out_net; if (net_eq(net, &init_net)) { /* For use by REJECT target */ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); /* Howto get NAT offsets */ RCU_INIT_POINTER(nf_ct_nat_offset, NULL); } return 0; out_net: if (net_eq(net, &init_net)) nf_conntrack_cleanup_init_net(); out_init_net: return ret; }
gpl-2.0
pgavin/or1k-src
readline/tilde.c
380
13192
/* tilde.c -- Tilde expansion code (~/foo := $HOME/foo). */ /* Copyright (C) 1988-2009 Free Software Foundation, Inc. This file is part of the GNU Readline Library (Readline), a library for reading lines of text with interactive input and history editing. Readline is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Readline is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Readline. If not, see <http://www.gnu.org/licenses/>. */ #if defined (HAVE_CONFIG_H) # include <config.h> #endif #if defined (HAVE_UNISTD_H) # ifdef _MINIX # include <sys/types.h> # endif # include <unistd.h> #endif #if defined (HAVE_STRING_H) # include <string.h> #else /* !HAVE_STRING_H */ # include <strings.h> #endif /* !HAVE_STRING_H */ #if defined (HAVE_STDLIB_H) # include <stdlib.h> #else # include "ansi_stdlib.h" #endif /* HAVE_STDLIB_H */ #include <sys/types.h> #if defined (HAVE_PWD_H) #include <pwd.h> #endif #include "tilde.h" #if defined (TEST) || defined (STATIC_MALLOC) static void *xmalloc (), *xrealloc (); #else # include "xmalloc.h" #endif /* TEST || STATIC_MALLOC */ #if !defined (HAVE_GETPW_DECLS) # if defined (HAVE_GETPWUID) extern struct passwd *getpwuid PARAMS((uid_t)); # endif # if defined (HAVE_GETPWNAM) extern struct passwd *getpwnam PARAMS((const char *)); # endif #endif /* !HAVE_GETPW_DECLS */ #if !defined (savestring) #define savestring(x) strcpy ((char *)xmalloc (1 + strlen (x)), (x)) #endif /* !savestring */ #if !defined (NULL) # if defined (__STDC__) # define NULL ((void *) 0) # else # define NULL 0x0 # endif /* !__STDC__ */ #endif /* !NULL */ /* If being compiled as part of bash, these will be satisfied from variables.o. If being compiled as part of readline, they will be satisfied from shell.o. */ extern char *sh_get_home_dir PARAMS((void)); extern char *sh_get_env_value PARAMS((const char *)); /* The default value of tilde_additional_prefixes. This is set to whitespace preceding a tilde so that simple programs which do not perform any word separation get desired behaviour. */ static const char *default_prefixes[] = { " ~", "\t~", (const char *)NULL }; /* The default value of tilde_additional_suffixes. This is set to whitespace or newline so that simple programs which do not perform any word separation get desired behaviour. */ static const char *default_suffixes[] = { " ", "\n", (const char *)NULL }; /* If non-null, this contains the address of a function that the application wants called before trying the standard tilde expansions. The function is called with the text sans tilde, and returns a malloc()'ed string which is the expansion, or a NULL pointer if the expansion fails. */ tilde_hook_func_t *tilde_expansion_preexpansion_hook = (tilde_hook_func_t *)NULL; /* If non-null, this contains the address of a function to call if the standard meaning for expanding a tilde fails. The function is called with the text (sans tilde, as in "foo"), and returns a malloc()'ed string which is the expansion, or a NULL pointer if there is no expansion. */ tilde_hook_func_t *tilde_expansion_failure_hook = (tilde_hook_func_t *)NULL; /* When non-null, this is a NULL terminated array of strings which are duplicates for a tilde prefix. Bash uses this to expand `=~' and `:~'. */ char **tilde_additional_prefixes = (char **)default_prefixes; /* When non-null, this is a NULL terminated array of strings which match the end of a username, instead of just "/". Bash sets this to `:' and `=~'. */ char **tilde_additional_suffixes = (char **)default_suffixes; static int tilde_find_prefix PARAMS((const char *, int *)); static int tilde_find_suffix PARAMS((const char *)); static char *isolate_tilde_prefix PARAMS((const char *, int *)); static char *glue_prefix_and_suffix PARAMS((char *, const char *, int)); /* Find the start of a tilde expansion in STRING, and return the index of the tilde which starts the expansion. Place the length of the text which identified this tilde starter in LEN, excluding the tilde itself. */ static int tilde_find_prefix (string, len) const char *string; int *len; { register int i, j, string_len; register char **prefixes; prefixes = tilde_additional_prefixes; string_len = strlen (string); *len = 0; if (*string == '\0' || *string == '~') return (0); if (prefixes) { for (i = 0; i < string_len; i++) { for (j = 0; prefixes[j]; j++) { if (strncmp (string + i, prefixes[j], strlen (prefixes[j])) == 0) { *len = strlen (prefixes[j]) - 1; return (i + *len); } } } } return (string_len); } /* Find the end of a tilde expansion in STRING, and return the index of the character which ends the tilde definition. */ static int tilde_find_suffix (string) const char *string; { register int i, j, string_len; register char **suffixes; suffixes = tilde_additional_suffixes; string_len = strlen (string); for (i = 0; i < string_len; i++) { #if defined (__MSDOS__) if (string[i] == '/' || string[i] == '\\' /* || !string[i] */) #else if (string[i] == '/' /* || !string[i] */) #endif break; for (j = 0; suffixes && suffixes[j]; j++) { if (strncmp (string + i, suffixes[j], strlen (suffixes[j])) == 0) return (i); } } return (i); } /* Return a new string which is the result of tilde expanding STRING. */ char * tilde_expand (string) const char *string; { char *result; int result_size, result_index; result_index = result_size = 0; if (result = strchr (string, '~')) result = (char *)xmalloc (result_size = (strlen (string) + 16)); else result = (char *)xmalloc (result_size = (strlen (string) + 1)); /* Scan through STRING expanding tildes as we come to them. */ while (1) { register int start, end; char *tilde_word, *expansion; int len; /* Make START point to the tilde which starts the expansion. */ start = tilde_find_prefix (string, &len); /* Copy the skipped text into the result. */ if ((result_index + start + 1) > result_size) result = (char *)xrealloc (result, 1 + (result_size += (start + 20))); strncpy (result + result_index, string, start); result_index += start; /* Advance STRING to the starting tilde. */ string += start; /* Make END be the index of one after the last character of the username. */ end = tilde_find_suffix (string); /* If both START and END are zero, we are all done. */ if (!start && !end) break; /* Expand the entire tilde word, and copy it into RESULT. */ tilde_word = (char *)xmalloc (1 + end); strncpy (tilde_word, string, end); tilde_word[end] = '\0'; string += end; expansion = tilde_expand_word (tilde_word); xfree (tilde_word); len = strlen (expansion); #ifdef __CYGWIN__ /* Fix for Cygwin to prevent ~user/xxx from expanding to //xxx when $HOME for `user' is /. On cygwin, // denotes a network drive. */ if (len > 1 || *expansion != '/' || *string != '/') #endif { if ((result_index + len + 1) > result_size) result = (char *)xrealloc (result, 1 + (result_size += (len + 20))); strcpy (result + result_index, expansion); result_index += len; } xfree (expansion); } result[result_index] = '\0'; return (result); } /* Take FNAME and return the tilde prefix we want expanded. If LENP is non-null, the index of the end of the prefix into FNAME is returned in the location it points to. */ static char * isolate_tilde_prefix (fname, lenp) const char *fname; int *lenp; { char *ret; int i; ret = (char *)xmalloc (strlen (fname)); #if defined (__MSDOS__) for (i = 1; fname[i] && fname[i] != '/' && fname[i] != '\\'; i++) #else for (i = 1; fname[i] && fname[i] != '/'; i++) #endif ret[i - 1] = fname[i]; ret[i - 1] = '\0'; if (lenp) *lenp = i; return ret; } #if 0 /* Public function to scan a string (FNAME) beginning with a tilde and find the portion of the string that should be passed to the tilde expansion function. Right now, it just calls tilde_find_suffix and allocates new memory, but it can be expanded to do different things later. */ char * tilde_find_word (fname, flags, lenp) const char *fname; int flags, *lenp; { int x; char *r; x = tilde_find_suffix (fname); if (x == 0) { r = savestring (fname); if (lenp) *lenp = 0; } else { r = (char *)xmalloc (1 + x); strncpy (r, fname, x); r[x] = '\0'; if (lenp) *lenp = x; } return r; } #endif /* Return a string that is PREFIX concatenated with SUFFIX starting at SUFFIND. */ static char * glue_prefix_and_suffix (prefix, suffix, suffind) char *prefix; const char *suffix; int suffind; { char *ret; int plen, slen; plen = (prefix && *prefix) ? strlen (prefix) : 0; slen = strlen (suffix + suffind); ret = (char *)xmalloc (plen + slen + 1); if (plen) strcpy (ret, prefix); strcpy (ret + plen, suffix + suffind); return ret; } /* Do the work of tilde expansion on FILENAME. FILENAME starts with a tilde. If there is no expansion, call tilde_expansion_failure_hook. This always returns a newly-allocated string, never static storage. */ char * tilde_expand_word (filename) const char *filename; { char *dirname, *expansion, *username; int user_len; struct passwd *user_entry; if (filename == 0) return ((char *)NULL); if (*filename != '~') return (savestring (filename)); /* A leading `~/' or a bare `~' is *always* translated to the value of $HOME or the home directory of the current user, regardless of any preexpansion hook. */ if (filename[1] == '\0' || filename[1] == '/') { /* Prefix $HOME to the rest of the string. */ expansion = sh_get_env_value ("HOME"); /* If there is no HOME variable, look up the directory in the password database. */ if (expansion == 0) expansion = sh_get_home_dir (); return (glue_prefix_and_suffix (expansion, filename, 1)); } username = isolate_tilde_prefix (filename, &user_len); if (tilde_expansion_preexpansion_hook) { expansion = (*tilde_expansion_preexpansion_hook) (username); if (expansion) { dirname = glue_prefix_and_suffix (expansion, filename, user_len); xfree (username); xfree (expansion); return (dirname); } } /* No preexpansion hook, or the preexpansion hook failed. Look in the password database. */ dirname = (char *)NULL; #if defined (HAVE_GETPWNAM) user_entry = getpwnam (username); #else user_entry = 0; #endif if (user_entry == 0) { /* If the calling program has a special syntax for expanding tildes, and we couldn't find a standard expansion, then let them try. */ if (tilde_expansion_failure_hook) { expansion = (*tilde_expansion_failure_hook) (username); if (expansion) { dirname = glue_prefix_and_suffix (expansion, filename, user_len); xfree (expansion); } } /* If we don't have a failure hook, or if the failure hook did not expand the tilde, return a copy of what we were passed. */ if (dirname == 0) dirname = savestring (filename); } #if defined (HAVE_GETPWENT) else dirname = glue_prefix_and_suffix (user_entry->pw_dir, filename, user_len); #endif xfree (username); #if defined (HAVE_GETPWENT) endpwent (); #endif return (dirname); } #if defined (TEST) #undef NULL #include <stdio.h> main (argc, argv) int argc; char **argv; { char *result, line[512]; int done = 0; while (!done) { printf ("~expand: "); fflush (stdout); if (!gets (line)) strcpy (line, "done"); if ((strcmp (line, "done") == 0) || (strcmp (line, "quit") == 0) || (strcmp (line, "exit") == 0)) { done = 1; break; } result = tilde_expand (line); printf (" --> %s\n", result); free (result); } exit (0); } static void memory_error_and_abort (); static void * xmalloc (bytes) size_t bytes; { void *temp = (char *)malloc (bytes); if (!temp) memory_error_and_abort (); return (temp); } static void * xrealloc (pointer, bytes) void *pointer; int bytes; { void *temp; if (!pointer) temp = malloc (bytes); else temp = realloc (pointer, bytes); if (!temp) memory_error_and_abort (); return (temp); } static void memory_error_and_abort () { fprintf (stderr, "readline: out of virtual memory\n"); abort (); } /* * Local variables: * compile-command: "gcc -g -DTEST -o tilde tilde.c" * end: */ #endif /* TEST */
gpl-2.0
walter79/jordan-kernel
drivers/mmc/host/mvsdio.c
636
25616
/* * Marvell MMC/SD/SDIO driver * * Authors: Maen Suleiman, Nicolas Pitre * Copyright (C) 2008-2009 Marvell Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/mbus.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/mmc/host.h> #include <asm/sizes.h> #include <asm/unaligned.h> #include <plat/mvsdio.h> #include "mvsdio.h" #define DRIVER_NAME "mvsdio" static int maxfreq = MVSD_CLOCKRATE_MAX; static int nodma; struct mvsd_host { void __iomem *base; struct mmc_request *mrq; spinlock_t lock; unsigned int xfer_mode; unsigned int intr_en; unsigned int ctrl; unsigned int pio_size; void *pio_ptr; unsigned int sg_frags; unsigned int ns_per_clk; unsigned int clock; unsigned int base_clock; struct timer_list timer; struct mmc_host *mmc; struct device *dev; struct resource *res; int irq; int gpio_card_detect; int gpio_write_protect; }; #define mvsd_write(offs, val) writel(val, iobase + (offs)) #define mvsd_read(offs) readl(iobase + (offs)) static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) { void __iomem *iobase = host->base; unsigned int tmout; int tmout_index; /* * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE * register is sometimes not set before a while when some * "unusual" data block sizes are used (such as with the SWITCH * command), even despite the fact that the XFER_DONE interrupt * was raised. And if another data transfer starts before * this bit comes to good sense (which eventually happens by * itself) then the new transfer simply fails with a timeout. */ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { unsigned long t = jiffies + HZ; unsigned int hw_state, count = 0; do { if (time_after(jiffies, t)) { dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); break; } hw_state = mvsd_read(MVSD_HW_STATE); count++; } while (!(hw_state & (1 << 13))); dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " "(hw=0x%04x, count=%d, jiffies=%ld)\n", hw_state, count, jiffies - (t - HZ)); } /* If timeout=0 then maximum timeout index is used. */ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); tmout += data->timeout_clks; tmout_index = fls(tmout - 1) - 12; if (tmout_index < 0) tmout_index = 0; if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX) tmout_index = MVSD_HOST_CTRL_TMOUT_MAX; dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n", (data->flags & MMC_DATA_READ) ? "read" : "write", (u32)sg_virt(data->sg), data->blocks, data->blksz, tmout, tmout_index); host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK; host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index); mvsd_write(MVSD_HOST_CTRL, host->ctrl); mvsd_write(MVSD_BLK_COUNT, data->blocks); mvsd_write(MVSD_BLK_SIZE, data->blksz); if (nodma || (data->blksz | data->sg->offset) & 3) { /* * We cannot do DMA on a buffer which offset or size * is not aligned on a 4-byte boundary. */ host->pio_size = data->blocks * data->blksz; host->pio_ptr = sg_virt(data->sg); if (!nodma) printk(KERN_DEBUG "%s: fallback to PIO for data " "at 0x%p size %d\n", mmc_hostname(host->mmc), host->pio_ptr, host->pio_size); return 1; } else { dma_addr_t phys_addr; int dma_dir = (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, dma_dir); phys_addr = sg_dma_address(data->sg); mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); return 0; } } static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; struct mmc_command *cmd = mrq->cmd; u32 cmdreg = 0, xfer = 0, intr = 0; unsigned long flags; BUG_ON(host->mrq != NULL); host->mrq = mrq; dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n", cmd->opcode, mvsd_read(MVSD_HW_STATE)); cmdreg = MVSD_CMD_INDEX(cmd->opcode); if (cmd->flags & MMC_RSP_BUSY) cmdreg |= MVSD_CMD_RSP_48BUSY; else if (cmd->flags & MMC_RSP_136) cmdreg |= MVSD_CMD_RSP_136; else if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= MVSD_CMD_RSP_48; else cmdreg |= MVSD_CMD_RSP_NONE; if (cmd->flags & MMC_RSP_CRC) cmdreg |= MVSD_CMD_CHECK_CMDCRC; if (cmd->flags & MMC_RSP_OPCODE) cmdreg |= MVSD_CMD_INDX_CHECK; if (cmd->flags & MMC_RSP_PRESENT) { cmdreg |= MVSD_UNEXPECTED_RESP; intr |= MVSD_NOR_UNEXP_RSP; } if (mrq->data) { struct mmc_data *data = mrq->data; int pio; cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16; xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN; if (data->flags & MMC_DATA_READ) xfer |= MVSD_XFER_MODE_TO_HOST; pio = mvsd_setup_data(host, data); if (pio) { xfer |= MVSD_XFER_MODE_PIO; /* PIO section of mvsd_irq has comments on those bits */ if (data->flags & MMC_DATA_WRITE) intr |= MVSD_NOR_TX_AVAIL; else if (host->pio_size > 32) intr |= MVSD_NOR_RX_FIFO_8W; else intr |= MVSD_NOR_RX_READY; } if (data->stop) { struct mmc_command *stop = data->stop; u32 cmd12reg = 0; mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff); mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16); if (stop->flags & MMC_RSP_BUSY) cmd12reg |= MVSD_AUTOCMD12_BUSY; if (stop->flags & MMC_RSP_OPCODE) cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK; cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode); mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg); xfer |= MVSD_XFER_MODE_AUTO_CMD12; intr |= MVSD_NOR_AUTOCMD12_DONE; } else { intr |= MVSD_NOR_XFER_DONE; } } else { intr |= MVSD_NOR_CMD_DONE; } mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff); mvsd_write(MVSD_ARG_HI, cmd->arg >> 16); spin_lock_irqsave(&host->lock, flags); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; host->xfer_mode |= xfer; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mvsd_write(MVSD_CMD, cmdreg); host->intr_en &= MVSD_NOR_CARD_INT; host->intr_en |= intr | MVSD_NOR_ERROR; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0xffff); mod_timer(&host->timer, jiffies + 5 * HZ); spin_unlock_irqrestore(&host->lock, flags); } static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd, u32 err_status) { void __iomem *iobase = host->base; if (cmd->flags & MMC_RSP_136) { unsigned int response[8], i; for (i = 0; i < 8; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[0] & 0x03ff) << 22) | ((response[1] & 0xffff) << 6) | ((response[2] & 0xfc00) >> 10); cmd->resp[1] = ((response[2] & 0x03ff) << 22) | ((response[3] & 0xffff) << 6) | ((response[4] & 0xfc00) >> 10); cmd->resp[2] = ((response[4] & 0x03ff) << 22) | ((response[5] & 0xffff) << 6) | ((response[6] & 0xfc00) >> 10); cmd->resp[3] = ((response[6] & 0x03ff) << 22) | ((response[7] & 0x3fff) << 8); } else if (cmd->flags & MMC_RSP_PRESENT) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); cmd->resp[1] = ((response[0] & 0xfc00) >> 10); cmd->resp[2] = 0; cmd->resp[3] = 0; } if (err_status & MVSD_ERR_CMD_TIMEOUT) { cmd->error = -ETIMEDOUT; } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) { cmd->error = -EILSEQ; } err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT); return err_status; } static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, u32 err_status) { void __iomem *iobase = host->base; if (host->pio_ptr) { host->pio_ptr = NULL; host->pio_size = 0; } else { dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } if (err_status & MVSD_ERR_DATA_TIMEOUT) data->error = -ETIMEDOUT; else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT)) data->error = -EILSEQ; else if (err_status & MVSD_ERR_XFER_SIZE) data->error = -EBADE; err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE); dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n", mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT)); data->bytes_xfered = (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz; /* We can't be sure about the last block when errors are detected */ if (data->bytes_xfered && data->error) data->bytes_xfered -= data->blksz; /* Handle Auto cmd 12 response */ if (data->stop) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_AUTO_RSP(i)); data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); data->stop->resp[1] = ((response[0] & 0xfc00) >> 10); data->stop->resp[2] = 0; data->stop->resp[3] = 0; if (err_status & MVSD_ERR_AUTOCMD12) { u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS); dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12); if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE) data->stop->error = -ENOEXEC; else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT) data->stop->error = -ETIMEDOUT; else if (err_cmd12) data->stop->error = -EILSEQ; err_status &= ~MVSD_ERR_AUTOCMD12; } } return err_status; } static irqreturn_t mvsd_irq(int irq, void *dev) { struct mvsd_host *host = dev; void __iomem *iobase = host->base; u32 intr_status, intr_done_mask; int irq_handled = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", intr_status, mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_HW_STATE)); spin_lock(&host->lock); /* PIO handling, if needed. Messy business... */ if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { readsw(iobase + MVSD_FIFO, p, 16); p += 16; s -= 32; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } /* * Normally we'd use < 32 here, but the RX_FIFO_8W bit * doesn't appear to assert when there is exactly 32 bytes * (8 words) left to fetch in a transfer. */ if (s <= 32) { while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) { put_unaligned(mvsd_read(MVSD_FIFO), p++); put_unaligned(mvsd_read(MVSD_FIFO), p++); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { u16 val[2] = {0, 0}; val[0] = mvsd_read(MVSD_FIFO); val[1] = mvsd_read(MVSD_FIFO); memcpy(p, ((void *)&val) + 4 - s, s); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) { host->intr_en &= ~MVSD_NOR_RX_FIFO_8W; host->intr_en |= MVSD_NOR_RX_READY; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } else if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; /* * The TX_FIFO_8W bit is unreliable. When set, bursting * 16 halfwords all at once in the FIFO drops data. Actually * TX_AVAIL does go off after only one word is pushed even if * TX_FIFO_8W remains set. */ while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) { mvsd_write(MVSD_FIFO, get_unaligned(p++)); mvsd_write(MVSD_FIFO, get_unaligned(p++)); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s < 4) { if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { u16 val[2] = {0, 0}; memcpy(((void *)&val) + 4 - s, p, s); mvsd_write(MVSD_FIFO, val[0]); mvsd_write(MVSD_FIFO, val[1]); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } mvsd_write(MVSD_NOR_INTR_STATUS, intr_status); intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W; if (intr_status & host->intr_en & ~intr_done_mask) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = mrq->cmd; u32 err_status = 0; del_timer(&host->timer); host->mrq = NULL; host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); spin_unlock(&host->lock); if (intr_status & MVSD_NOR_UNEXP_RSP) { cmd->error = -EPROTO; } else if (intr_status & MVSD_NOR_ERROR) { err_status = mvsd_read(MVSD_ERR_INTR_STATUS); dev_dbg(host->dev, "err 0x%04x\n", err_status); } err_status = mvsd_finish_cmd(host, cmd, err_status); if (mrq->data) err_status = mvsd_finish_data(host, mrq->data, err_status); if (err_status) { printk(KERN_ERR "%s: unhandled error status %#04x\n", mmc_hostname(host->mmc), err_status); cmd->error = -ENOMSG; } mmc_request_done(host->mmc, mrq); irq_handled = 1; } else spin_unlock(&host->lock); if (intr_status & MVSD_NOR_CARD_INT) { mmc_signal_sdio_irq(host->mmc); irq_handled = 1; } if (irq_handled) return IRQ_HANDLED; printk(KERN_ERR "%s: unhandled interrupt status=0x%04x en=0x%04x " "pio=%d\n", mmc_hostname(host->mmc), intr_status, host->intr_en, host->pio_size); return IRQ_NONE; } static void mvsd_timeout_timer(unsigned long data) { struct mvsd_host *host = (struct mvsd_host *)data; void __iomem *iobase = host->base; struct mmc_request *mrq; unsigned long flags; spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; if (mrq) { printk(KERN_ERR "%s: Timeout waiting for hardware interrupt.\n", mmc_hostname(host->mmc)); printk(KERN_ERR "%s: hw_state=0x%04x, intr_status=0x%04x " "intr_en=0x%04x\n", mmc_hostname(host->mmc), mvsd_read(MVSD_HW_STATE), mvsd_read(MVSD_NOR_INTR_STATUS), mvsd_read(MVSD_NOR_INTR_EN)); host->mrq = NULL; mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mrq->cmd->error = -ETIMEDOUT; mvsd_finish_cmd(host, mrq->cmd, 0); if (mrq->data) { mrq->data->error = -ETIMEDOUT; mvsd_finish_data(host, mrq->data, 0); } } spin_unlock_irqrestore(&host->lock, flags); if (mrq) mmc_request_done(host->mmc, mrq); } static irqreturn_t mvsd_card_detect_irq(int irq, void *dev) { struct mvsd_host *host = dev; mmc_detect_change(host->mmc, msecs_to_jiffies(100)); return IRQ_HANDLED; } static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (enable) { host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN; host->intr_en |= MVSD_NOR_CARD_INT; } else { host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN; host->intr_en &= ~MVSD_NOR_CARD_INT; } mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); spin_unlock_irqrestore(&host->lock, flags); } static int mvsd_get_ro(struct mmc_host *mmc) { struct mvsd_host *host = mmc_priv(mmc); if (host->gpio_write_protect) return gpio_get_value(host->gpio_write_protect); /* * Board doesn't support read only detection; let the mmc core * decide what to do. */ return -ENOSYS; } static void mvsd_power_up(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power up\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, 0); mvsd_write(MVSD_NOR_STATUS_EN, 0xffff); mvsd_write(MVSD_ERR_STATUS_EN, 0xffff); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_power_down(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power down\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_NOR_STATUS_EN, 0); mvsd_write(MVSD_ERR_STATUS_EN, 0); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; u32 ctrl_reg = 0; if (ios->power_mode == MMC_POWER_UP) mvsd_power_up(host); if (ios->clock == 0) { mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX); host->clock = 0; dev_dbg(host->dev, "clock off\n"); } else if (ios->clock != host->clock) { u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1; if (m > MVSD_BASE_DIV_MAX) m = MVSD_BASE_DIV_MAX; mvsd_write(MVSD_CLK_DIV, m); host->clock = ios->clock; host->ns_per_clk = 1000000000 / (host->base_clock / (m+1)); dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n", ios->clock, host->base_clock / (m+1), m); } /* default transfer mode */ ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN; ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST; /* default to maximum timeout */ ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK; ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN; if (ios->bus_mode == MMC_BUSMODE_PUSHPULL) ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN; if (ios->bus_width == MMC_BUS_WIDTH_4) ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; /* * The HI_SPEED_EN bit is causing trouble with many (but not all) * high speed SD, SDHC and SDIO cards. Not enabling that bit * makes all cards work. So let's just ignore that bit for now * and revisit this issue if problems for not enabling this bit * are ever reported. */ #if 0 if (ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_SD_HS) ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; #endif host->ctrl = ctrl_reg; mvsd_write(MVSD_HOST_CTRL, ctrl_reg); dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg, (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ? "push-pull" : "open-drain", (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ? "4bit-width" : "1bit-width", (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ? "high-speed" : ""); if (ios->power_mode == MMC_POWER_OFF) mvsd_power_down(host); } static const struct mmc_host_ops mvsd_ops = { .request = mvsd_request, .get_ro = mvsd_get_ro, .set_ios = mvsd_set_ios, .enable_sdio_irq = mvsd_enable_sdio_irq, }; static void __init mv_conf_mbus_windows(struct mvsd_host *host, struct mbus_dram_target_info *dram) { void __iomem *iobase = host->base; int i; for (i = 0; i < 4; i++) { writel(0, iobase + MVSD_WINDOW_CTRL(i)); writel(0, iobase + MVSD_WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, iobase + MVSD_WINDOW_CTRL(i)); writel(cs->base, iobase + MVSD_WINDOW_BASE(i)); } } static int __init mvsd_probe(struct platform_device *pdev) { struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mvsdio_platform_data *mvsd_data; struct resource *r; int ret, irq; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); mvsd_data = pdev->dev.platform_data; if (!r || irq < 0 || !mvsd_data) return -ENXIO; r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); if (!r) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->mmc = mmc; host->dev = &pdev->dev; host->res = r; host->base_clock = mvsd_data->clock / 2; mmc->ops = &mvsd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); mmc->f_max = maxfreq; mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; mmc->max_hw_segs = 1; mmc->max_phys_segs = 1; mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; spin_lock_init(&host->lock); host->base = ioremap(r->start, SZ_4K); if (!host->base) { ret = -ENOMEM; goto out; } /* (Re-)program MBUS remapping windows if we are asked to. */ if (mvsd_data->dram != NULL) mv_conf_mbus_windows(host, mvsd_data->dram); mvsd_power_down(host); ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); if (ret) { printk(KERN_ERR "%s: cannot assign irq %d\n", DRIVER_NAME, irq); goto out; } else host->irq = irq; if (mvsd_data->gpio_card_detect) { ret = gpio_request(mvsd_data->gpio_card_detect, DRIVER_NAME " cd"); if (ret == 0) { gpio_direction_input(mvsd_data->gpio_card_detect); irq = gpio_to_irq(mvsd_data->gpio_card_detect); ret = request_irq(irq, mvsd_card_detect_irq, IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, DRIVER_NAME " cd", host); if (ret == 0) host->gpio_card_detect = mvsd_data->gpio_card_detect; else gpio_free(mvsd_data->gpio_card_detect); } } if (!host->gpio_card_detect) mmc->caps |= MMC_CAP_NEEDS_POLL; if (mvsd_data->gpio_write_protect) { ret = gpio_request(mvsd_data->gpio_write_protect, DRIVER_NAME " wp"); if (ret == 0) { gpio_direction_input(mvsd_data->gpio_write_protect); host->gpio_write_protect = mvsd_data->gpio_write_protect; } } setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); ret = mmc_add_host(mmc); if (ret) goto out; printk(KERN_NOTICE "%s: %s driver initialized, ", mmc_hostname(mmc), DRIVER_NAME); if (host->gpio_card_detect) printk("using GPIO %d for card detection\n", host->gpio_card_detect); else printk("lacking card detect (fall back to polling)\n"); return 0; out: if (host) { if (host->irq) free_irq(host->irq, host); if (host->gpio_card_detect) { free_irq(gpio_to_irq(host->gpio_card_detect), host); gpio_free(host->gpio_card_detect); } if (host->gpio_write_protect) gpio_free(host->gpio_write_protect); if (host->base) iounmap(host->base); } if (r) release_resource(r); if (mmc) mmc_free_host(mmc); return ret; } static int __exit mvsd_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); if (mmc) { struct mvsd_host *host = mmc_priv(mmc); if (host->gpio_card_detect) { free_irq(gpio_to_irq(host->gpio_card_detect), host); gpio_free(host->gpio_card_detect); } mmc_remove_host(mmc); free_irq(host->irq, host); if (host->gpio_write_protect) gpio_free(host->gpio_write_protect); del_timer_sync(&host->timer); mvsd_power_down(host); iounmap(host->base); release_resource(host->res); mmc_free_host(mmc); } platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int mvsd_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); int ret = 0; if (mmc) ret = mmc_suspend_host(mmc, state); return ret; } static int mvsd_resume(struct platform_device *dev) { struct mmc_host *mmc = platform_get_drvdata(dev); int ret = 0; if (mmc) ret = mmc_resume_host(mmc); return ret; } #else #define mvsd_suspend NULL #define mvsd_resume NULL #endif static struct platform_driver mvsd_driver = { .remove = __exit_p(mvsd_remove), .suspend = mvsd_suspend, .resume = mvsd_resume, .driver = { .name = DRIVER_NAME, }, }; static int __init mvsd_init(void) { return platform_driver_probe(&mvsd_driver, mvsd_probe); } static void __exit mvsd_exit(void) { platform_driver_unregister(&mvsd_driver); } module_init(mvsd_init); module_exit(mvsd_exit); /* maximum card clock frequency (default 50MHz) */ module_param(maxfreq, int, 0); /* force PIO transfers all the time */ module_param(nodma, int, 0); MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:mvsdio");
gpl-2.0
SK4G/android_kernel_samsung_sidekick4g
drivers/hwmon/ams/ams-i2c.c
636
6365
/* * Apple Motion Sensor driver (I2C variant) * * Copyright (C) 2005 Stelian Pop (stelian@popies.net) * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch) * * Clean room implementation based on the reverse engineered Mac OS X driver by * Johannes Berg <johannes@sipsolutions.net>, documentation available at * http://johannes.sipsolutions.net/PowerBook/Apple_Motion_Sensor_Specification * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include "ams.h" /* AMS registers */ #define AMS_COMMAND 0x00 /* command register */ #define AMS_STATUS 0x01 /* status register */ #define AMS_CTRL1 0x02 /* read control 1 (number of values) */ #define AMS_CTRL2 0x03 /* read control 2 (offset?) */ #define AMS_CTRL3 0x04 /* read control 3 (size of each value?) */ #define AMS_DATA1 0x05 /* read data 1 */ #define AMS_DATA2 0x06 /* read data 2 */ #define AMS_DATA3 0x07 /* read data 3 */ #define AMS_DATA4 0x08 /* read data 4 */ #define AMS_DATAX 0x20 /* data X */ #define AMS_DATAY 0x21 /* data Y */ #define AMS_DATAZ 0x22 /* data Z */ #define AMS_FREEFALL 0x24 /* freefall int control */ #define AMS_SHOCK 0x25 /* shock int control */ #define AMS_SENSLOW 0x26 /* sensitivity low limit */ #define AMS_SENSHIGH 0x27 /* sensitivity high limit */ #define AMS_CTRLX 0x28 /* control X */ #define AMS_CTRLY 0x29 /* control Y */ #define AMS_CTRLZ 0x2A /* control Z */ #define AMS_UNKNOWN1 0x2B /* unknown 1 */ #define AMS_UNKNOWN2 0x2C /* unknown 2 */ #define AMS_UNKNOWN3 0x2D /* unknown 3 */ #define AMS_VENDOR 0x2E /* vendor */ /* AMS commands - use with the AMS_COMMAND register */ enum ams_i2c_cmd { AMS_CMD_NOOP = 0, AMS_CMD_VERSION, AMS_CMD_READMEM, AMS_CMD_WRITEMEM, AMS_CMD_ERASEMEM, AMS_CMD_READEE, AMS_CMD_WRITEEE, AMS_CMD_RESET, AMS_CMD_START, }; static int ams_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); static int ams_i2c_remove(struct i2c_client *client); static const struct i2c_device_id ams_id[] = { { "ams", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ams_id); static struct i2c_driver ams_i2c_driver = { .driver = { .name = "ams", .owner = THIS_MODULE, }, .probe = ams_i2c_probe, .remove = ams_i2c_remove, .id_table = ams_id, }; static s32 ams_i2c_read(u8 reg) { return i2c_smbus_read_byte_data(ams_info.i2c_client, reg); } static int ams_i2c_write(u8 reg, u8 value) { return i2c_smbus_write_byte_data(ams_info.i2c_client, reg, value); } static int ams_i2c_cmd(enum ams_i2c_cmd cmd) { s32 result; int count = 3; ams_i2c_write(AMS_COMMAND, cmd); msleep(5); while (count--) { result = ams_i2c_read(AMS_COMMAND); if (result == 0 || result & 0x80) return 0; schedule_timeout_uninterruptible(HZ / 20); } return -1; } static void ams_i2c_set_irq(enum ams_irq reg, char enable) { if (reg & AMS_IRQ_FREEFALL) { u8 val = ams_i2c_read(AMS_CTRLX); if (enable) val |= 0x80; else val &= ~0x80; ams_i2c_write(AMS_CTRLX, val); } if (reg & AMS_IRQ_SHOCK) { u8 val = ams_i2c_read(AMS_CTRLY); if (enable) val |= 0x80; else val &= ~0x80; ams_i2c_write(AMS_CTRLY, val); } if (reg & AMS_IRQ_GLOBAL) { u8 val = ams_i2c_read(AMS_CTRLZ); if (enable) val |= 0x80; else val &= ~0x80; ams_i2c_write(AMS_CTRLZ, val); } } static void ams_i2c_clear_irq(enum ams_irq reg) { if (reg & AMS_IRQ_FREEFALL) ams_i2c_write(AMS_FREEFALL, 0); if (reg & AMS_IRQ_SHOCK) ams_i2c_write(AMS_SHOCK, 0); } static u8 ams_i2c_get_vendor(void) { return ams_i2c_read(AMS_VENDOR); } static void ams_i2c_get_xyz(s8 *x, s8 *y, s8 *z) { *x = ams_i2c_read(AMS_DATAX); *y = ams_i2c_read(AMS_DATAY); *z = ams_i2c_read(AMS_DATAZ); } static int ams_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int vmaj, vmin; int result; /* There can be only one */ if (unlikely(ams_info.has_device)) return -ENODEV; ams_info.i2c_client = client; if (ams_i2c_cmd(AMS_CMD_RESET)) { printk(KERN_INFO "ams: Failed to reset the device\n"); return -ENODEV; } if (ams_i2c_cmd(AMS_CMD_START)) { printk(KERN_INFO "ams: Failed to start the device\n"); return -ENODEV; } /* get version/vendor information */ ams_i2c_write(AMS_CTRL1, 0x02); ams_i2c_write(AMS_CTRL2, 0x85); ams_i2c_write(AMS_CTRL3, 0x01); ams_i2c_cmd(AMS_CMD_READMEM); vmaj = ams_i2c_read(AMS_DATA1); vmin = ams_i2c_read(AMS_DATA2); if (vmaj != 1 || vmin != 52) { printk(KERN_INFO "ams: Incorrect device version (%d.%d)\n", vmaj, vmin); return -ENODEV; } ams_i2c_cmd(AMS_CMD_VERSION); vmaj = ams_i2c_read(AMS_DATA1); vmin = ams_i2c_read(AMS_DATA2); if (vmaj != 0 || vmin != 1) { printk(KERN_INFO "ams: Incorrect firmware version (%d.%d)\n", vmaj, vmin); return -ENODEV; } /* Disable interrupts */ ams_i2c_set_irq(AMS_IRQ_ALL, 0); result = ams_sensor_attach(); if (result < 0) return result; /* Set default values */ ams_i2c_write(AMS_SENSLOW, 0x15); ams_i2c_write(AMS_SENSHIGH, 0x60); ams_i2c_write(AMS_CTRLX, 0x08); ams_i2c_write(AMS_CTRLY, 0x0F); ams_i2c_write(AMS_CTRLZ, 0x4F); ams_i2c_write(AMS_UNKNOWN1, 0x14); /* Clear interrupts */ ams_i2c_clear_irq(AMS_IRQ_ALL); ams_info.has_device = 1; /* Enable interrupts */ ams_i2c_set_irq(AMS_IRQ_ALL, 1); printk(KERN_INFO "ams: Found I2C based motion sensor\n"); return 0; } static int ams_i2c_remove(struct i2c_client *client) { if (ams_info.has_device) { /* Disable interrupts */ ams_i2c_set_irq(AMS_IRQ_ALL, 0); /* Clear interrupts */ ams_i2c_clear_irq(AMS_IRQ_ALL); printk(KERN_INFO "ams: Unloading\n"); ams_info.has_device = 0; } return 0; } static void ams_i2c_exit(void) { i2c_del_driver(&ams_i2c_driver); } int __init ams_i2c_init(struct device_node *np) { int result; /* Set implementation stuff */ ams_info.of_node = np; ams_info.exit = ams_i2c_exit; ams_info.get_vendor = ams_i2c_get_vendor; ams_info.get_xyz = ams_i2c_get_xyz; ams_info.clear_irq = ams_i2c_clear_irq; ams_info.bustype = BUS_I2C; result = i2c_add_driver(&ams_i2c_driver); return result; }
gpl-2.0
DirtyUnicorns/android_kernel_samsung_smdk4412
drivers/video/samsung/s3cfb2_fimd4x.c
892
13271
/* linux/drivers/video/samsung/s3cfb2_fimd4x.c * * Register interface file for Samsung Display Controller (FIMD) driver * * Jinsung Yang, Copyright (c) 2009 Samsung Electronics * http://www.samsungsemi.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/fb.h> #include <linux/io.h> #include <mach/map.h> #include <plat/clock.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include "s3cfb2.h" void s3cfb_check_line_count(struct s3cfb_global *ctrl) { int timeout = 30 * 5300; int i = 0; do { if (!(readl(ctrl->regs + S3C_VIDCON1) & 0x7ff0000)) break; i++; } while (i < timeout); if (i == timeout) { dev_err(ctrl->dev, "line count mismatch\n"); s3cfb_display_on(ctrl); } } int s3cfb_set_output(struct s3cfb_global *ctrl) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_VIDCON0); cfg &= ~S3C_VIDCON0_VIDOUT_MASK; if (ctrl->output == OUTPUT_RGB) cfg |= S3C_VIDCON0_VIDOUT_RGB; else if (ctrl->output == OUTPUT_ITU) cfg |= S3C_VIDCON0_VIDOUT_ITU; else if (ctrl->output == OUTPUT_I80LDI0) cfg |= S3C_VIDCON0_VIDOUT_I80LDI0; else if (ctrl->output == OUTPUT_I80LDI1) cfg |= S3C_VIDCON0_VIDOUT_I80LDI1; else { dev_err(ctrl->dev, "invalid output type: %d\n", ctrl->output); return -EINVAL; } writel(cfg, ctrl->regs + S3C_VIDCON0); return 0; } int s3cfb_set_display_mode(struct s3cfb_global *ctrl) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_VIDCON0); cfg &= ~S3C_VIDCON0_PNRMODE_MASK; cfg |= (ctrl->rgb_mode << S3C_VIDCON0_PNRMODE_SHIFT); writel(cfg, ctrl->regs + S3C_VIDCON0); return 0; } int s3cfb_display_on(struct s3cfb_global *ctrl) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_VIDCON0); cfg |= (S3C_VIDCON0_ENVID_ENABLE | S3C_VIDCON0_ENVID_F_ENABLE); writel(cfg, ctrl->regs + S3C_VIDCON0); dev_dbg(ctrl->dev, "global display is on\n"); return 0; } int s3cfb_display_off(struct s3cfb_global *ctrl) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_VIDCON0); cfg &= ~S3C_VIDCON0_ENVID_ENABLE; writel(cfg, ctrl->regs + S3C_VIDCON0); cfg &= ~S3C_VIDCON0_ENVID_F_ENABLE; writel(cfg, ctrl->regs + S3C_VIDCON0); dev_dbg(ctrl->dev, "global display is off\n"); return 0; } int s3cfb_frame_off(struct s3cfb_global *ctrl) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_VIDCON0); cfg &= ~S3C_VIDCON0_ENVID_F_ENABLE; writel(cfg, ctrl->regs + S3C_VIDCON0); dev_dbg(ctrl->dev, "current frame display is off\n"); return 0; } int s3cfb_set_clock(struct s3cfb_global *ctrl) { struct s3c_platform_fb *pdata = to_fb_plat(ctrl->dev); unsigned int cfg, maxclk, src_clk, vclk, div; maxclk = 66 * 1000000; /* fixed clock source: hclk */ cfg = readl(ctrl->regs + S3C_VIDCON0); cfg &= ~(S3C_VIDCON0_CLKSEL_MASK | S3C_VIDCON0_CLKVALUP_MASK | S3C_VIDCON0_VCLKEN_MASK | S3C_VIDCON0_CLKDIR_MASK); cfg |= (S3C_VIDCON0_CLKSEL_HCLK | S3C_VIDCON0_CLKVALUP_ALWAYS | S3C_VIDCON0_VCLKEN_NORMAL | S3C_VIDCON0_CLKDIR_DIVIDED); src_clk = ctrl->clock->parent->rate; vclk = ctrl->fb[pdata->default_win]->var.pixclock; if (vclk > maxclk) vclk = maxclk; div = src_clk / vclk; if (src_clk % vclk) div++; cfg |= S3C_VIDCON0_CLKVAL_F(div - 1); writel(cfg, ctrl->regs + S3C_VIDCON0); dev_dbg(ctrl->dev, "parent clock: %d, vclk: %d, vclk div: %d\n", src_clk, vclk, div); return 0; } int s3cfb_set_polarity(struct s3cfb_global *ctrl) { struct s3cfb_lcd_polarity *pol; unsigned int cfg; pol = &ctrl->lcd->polarity; cfg = 0; if (pol->rise_vclk) cfg |= S3C_VIDCON1_IVCLK_RISING_EDGE; if (pol->inv_hsync) cfg |= S3C_VIDCON1_IHSYNC_INVERT; if (pol->inv_vsync) cfg |= S3C_VIDCON1_IVSYNC_INVERT; if (pol->inv_vden) cfg |= S3C_VIDCON1_IVDEN_INVERT; writel(cfg, ctrl->regs + S3C_VIDCON1); return 0; } int s3cfb_set_timing(struct s3cfb_global *ctrl) { struct s3cfb_lcd_timing *time; unsigned int cfg; time = &ctrl->lcd->timing; cfg = 0; cfg |= S3C_VIDTCON0_VBPDE(time->v_bpe - 1); cfg |= S3C_VIDTCON0_VBPD(time->v_bp - 1); cfg |= S3C_VIDTCON0_VFPD(time->v_fp - 1); cfg |= S3C_VIDTCON0_VSPW(time->v_sw - 1); writel(cfg, ctrl->regs + S3C_VIDTCON0); cfg = 0; cfg |= S3C_VIDTCON1_VFPDE(time->v_fpe - 1); cfg |= S3C_VIDTCON1_HBPD(time->h_bp - 1); cfg |= S3C_VIDTCON1_HFPD(time->h_fp - 1); cfg |= S3C_VIDTCON1_HSPW(time->h_sw - 1); writel(cfg, ctrl->regs + S3C_VIDTCON1); return 0; } int s3cfb_set_lcd_size(struct s3cfb_global *ctrl) { unsigned int cfg = 0; cfg |= S3C_VIDTCON2_HOZVAL(ctrl->lcd->width - 1); cfg |= S3C_VIDTCON2_LINEVAL(ctrl->lcd->height - 1); writel(cfg, ctrl->regs + S3C_VIDTCON2); return 0; } int s3cfb_set_global_interrupt(struct s3cfb_global *ctrl, int enable) { unsigned int cfg = 0; cfg = readl(ctrl->regs + S3C_VIDINTCON0); cfg &= ~(S3C_VIDINTCON0_INTFRMEN_ENABLE | S3C_VIDINTCON0_INT_ENABLE); if (enable) { dev_dbg(ctrl->dev, "video interrupt is on\n"); cfg |= (S3C_VIDINTCON0_INTFRMEN_ENABLE | S3C_VIDINTCON0_INT_ENABLE); } else { dev_dbg(ctrl->dev, "video interrupt is off\n"); cfg |= (S3C_VIDINTCON0_INTFRMEN_DISABLE | S3C_VIDINTCON0_INT_DISABLE); } writel(cfg, ctrl->regs + S3C_VIDINTCON0); return 0; } int s3cfb_set_vsync_interrupt(struct s3cfb_global *ctrl, int enable) { unsigned int cfg = 0; cfg = readl(ctrl->regs + S3C_VIDINTCON0); cfg &= ~S3C_VIDINTCON0_FRAMESEL0_MASK; if (enable) { dev_dbg(ctrl->dev, "vsync interrupt is on\n"); cfg |= S3C_VIDINTCON0_FRAMESEL0_VSYNC; } else { dev_dbg(ctrl->dev, "vsync interrupt is off\n"); cfg &= ~S3C_VIDINTCON0_FRAMESEL0_VSYNC; } writel(cfg, ctrl->regs + S3C_VIDINTCON0); return 0; } #ifdef CONFIG_FB_S3C_V2_TRACE_UNDERRUN int s3cfb_set_fifo_interrupt(struct s3cfb_global *ctrl, int enable) { unsigned int cfg = 0; cfg = readl(ctrl->regs + S3C_VIDINTCON0); cfg &= ~(S3C_VIDINTCON0_FIFOSEL_MASK | S3C_VIDINTCON0_FIFOLEVEL_MASK); cfg |= (S3C_VIDINTCON0_FIFOSEL_ALL | S3C_VIDINTCON0_FIFOLEVEL_EMPTY); if (enable) { dev_dbg(ctrl->dev, "fifo interrupt is on\n"); cfg |= (S3C_VIDINTCON0_INTFIFO_ENABLE | S3C_VIDINTCON0_INT_ENABLE); } else { dev_dbg(ctrl->dev, "fifo interrupt is off\n"); cfg &= ~(S3C_VIDINTCON0_INTFIFO_ENABLE | S3C_VIDINTCON0_INT_ENABLE); } writel(cfg, ctrl->regs + S3C_VIDINTCON0); return 0; } #endif int s3cfb_clear_interrupt(struct s3cfb_global *ctrl) { unsigned int cfg = 0; cfg = readl(ctrl->regs + S3C_VIDINTCON1); if (cfg & S3C_VIDINTCON1_INTFIFOPEND) info("fifo underrun occur\n"); cfg |= (S3C_VIDINTCON1_INTVPPEND | S3C_VIDINTCON1_INTI80PEND | S3C_VIDINTCON1_INTFRMPEND | S3C_VIDINTCON1_INTFIFOPEND); writel(cfg, ctrl->regs + S3C_VIDINTCON1); return 0; } int s3cfb_window_on(struct s3cfb_global *ctrl, int id) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_WINCON(id)); cfg |= S3C_WINCON_ENWIN_ENABLE; writel(cfg, ctrl->regs + S3C_WINCON(id)); dev_dbg(ctrl->dev, "[fb%d] turn on\n", id); return 0; } int s3cfb_window_off(struct s3cfb_global *ctrl, int id) { unsigned int cfg; cfg = readl(ctrl->regs + S3C_WINCON(id)); cfg &= ~S3C_WINCON_ENWIN_ENABLE; writel(cfg, ctrl->regs + S3C_WINCON(id)); dev_dbg(ctrl->dev, "[fb%d] turn off\n", id); return 0; } int s3cfb_set_window_control(struct s3cfb_global *ctrl, int id) { struct s3c_platform_fb *pdata = to_fb_plat(ctrl->dev); struct fb_info *fb = ctrl->fb[id]; struct fb_var_screeninfo *var = &fb->var; struct s3cfb_window *win = fb->par; unsigned int cfg; cfg = readl(ctrl->regs + S3C_WINCON(id)); cfg &= ~(S3C_WINCON_BITSWP_ENABLE | S3C_WINCON_BYTESWP_ENABLE | S3C_WINCON_HAWSWP_ENABLE | S3C_WINCON_WSWP_ENABLE | S3C_WINCON_BURSTLEN_MASK | S3C_WINCON_BPPMODE_MASK | S3C_WINCON_INRGB_MASK | S3C_WINCON_DATAPATH_MASK); if (win->path == DATA_PATH_FIFO) { dev_dbg(ctrl->dev, "[fb%d] data path: fifo\n", id); cfg |= S3C_WINCON_DATAPATH_LOCAL; cfg |= S3C_WINCON_INRGB_RGB; cfg |= S3C_WINCON_BPPMODE_24BPP_888; if (id == 1) { cfg &= ~S3C_WINCON1_LOCALSEL_MASK; if (win->local_channel == 0) cfg |= S3C_WINCON1_LOCALSEL_TV; else cfg |= S3C_WINCON1_LOCALSEL_FIMC1; } } else { dev_dbg(ctrl->dev, "[fb%d] data path: dma\n", id); cfg |= S3C_WINCON_DATAPATH_DMA; if (fb->var.bits_per_pixel == 16 && pdata->swap & FB_SWAP_HWORD) cfg |= S3C_WINCON_HAWSWP_ENABLE; if (fb->var.bits_per_pixel == 32 && pdata->swap & FB_SWAP_WORD) cfg |= S3C_WINCON_WSWP_ENABLE; /* dma burst */ if (win->dma_burst == 4) cfg |= S3C_WINCON_BURSTLEN_4WORD; else if (win->dma_burst == 8) cfg |= S3C_WINCON_BURSTLEN_8WORD; else cfg |= S3C_WINCON_BURSTLEN_16WORD; /* bpp mode set */ switch (fb->var.bits_per_pixel) { case 16: if (var->transp.length == 1) { dev_dbg(ctrl->dev, "[fb%d] bpp mode: A1-R5-G5-B5\n", id); cfg |= S3C_WINCON_BPPMODE_16BPP_A555; } else if (var->transp.length == 4) { dev_dbg(ctrl->dev, "[fb%d] bpp mode: A4-R4-G4-B4\n", id); cfg |= S3C_WINCON_BPPMODE_16BPP_A444; } else { dev_dbg(ctrl->dev, "[fb%d] bpp mode: R5-G6-B5\n", id); cfg |= S3C_WINCON_BPPMODE_16BPP_565; } break; case 24: /* packed 24 bpp: nothing to do for 4.x fimd */ break; case 32: if (var->transp.length == 0) { dev_dbg(ctrl->dev, "[fb%d] bpp mode: R8-G8-B8\n", id); cfg |= S3C_WINCON_BPPMODE_24BPP_888; } else { dev_dbg(ctrl->dev, "[fb%d] bpp mode: A4-R8-G8-B8\n", id); cfg |= S3C_WINCON_BPPMODE_28BPP_A888; } break; } } writel(cfg, ctrl->regs + S3C_WINCON(id)); return 0; } int s3cfb_set_buffer_address(struct s3cfb_global *ctrl, int id) { struct fb_fix_screeninfo *fix = &ctrl->fb[id]->fix; struct fb_var_screeninfo *var = &ctrl->fb[id]->var; dma_addr_t start_addr = 0, end_addr = 0; if (fix->smem_start) { start_addr = fix->smem_start + (var->xres_virtual * (var->bits_per_pixel / 8) * var->yoffset); end_addr = start_addr + (var->xres_virtual * (var->bits_per_pixel / 8) * var->yres); } writel(start_addr, ctrl->regs + S3C_VIDADDR_START0(id)); writel(end_addr, ctrl->regs + S3C_VIDADDR_END0(id)); dev_dbg(ctrl->dev, "[fb%d] start_addr: 0x%08x, end_addr: 0x%08x\n", id, start_addr, end_addr); return 0; } int s3cfb_set_alpha_blending(struct s3cfb_global *ctrl, int id) { struct s3cfb_window *win = ctrl->fb[id]->par; struct s3cfb_alpha *alpha = &win->alpha; unsigned int avalue = 0, cfg; if (id == 0) { dev_err(ctrl->dev, "[fb%d] does not support alpha blending\n", id); return -EINVAL; } cfg = readl(ctrl->regs + S3C_WINCON(id)); cfg &= ~(S3C_WINCON_BLD_MASK | S3C_WINCON_ALPHA_SEL_MASK); if (alpha->mode == PIXEL_BLENDING) { dev_dbg(ctrl->dev, "[fb%d] alpha mode: pixel blending\n", id); /* fixing to DATA[31:24] for alpha value */ cfg |= (S3C_WINCON_BLD_PIXEL | S3C_WINCON_ALPHA1_SEL); } else { dev_dbg(ctrl->dev, "[fb%d] alpha mode: plane %d blending\n", id, alpha->channel); cfg |= S3C_WINCON_BLD_PLANE; if (alpha->channel == 0) { cfg |= S3C_WINCON_ALPHA0_SEL; avalue = (alpha->value << S3C_VIDOSD_ALPHA0_SHIFT); } else { cfg |= S3C_WINCON_ALPHA1_SEL; avalue = (alpha->value << S3C_VIDOSD_ALPHA1_SHIFT); } } writel(cfg, ctrl->regs + S3C_WINCON(id)); writel(avalue, ctrl->regs + S3C_VIDOSD_C(id)); return 0; } int s3cfb_set_window_position(struct s3cfb_global *ctrl, int id) { struct fb_var_screeninfo *var = &ctrl->fb[id]->var; struct s3cfb_window *win = ctrl->fb[id]->par; unsigned int cfg; cfg = S3C_VIDOSD_LEFT_X(win->x) | S3C_VIDOSD_TOP_Y(win->y); writel(cfg, ctrl->regs + S3C_VIDOSD_A(id)); cfg = S3C_VIDOSD_RIGHT_X(win->x + var->xres - 1) | S3C_VIDOSD_BOTTOM_Y(win->y + var->yres - 1); writel(cfg, ctrl->regs + S3C_VIDOSD_B(id)); dev_dbg(ctrl->dev, "[fb%d] offset: (%d, %d, %d, %d)\n", id, win->x, win->y, win->x + var->xres - 1, win->y + var->yres - 1); return 0; } int s3cfb_set_window_size(struct s3cfb_global *ctrl, int id) { struct fb_var_screeninfo *var = &ctrl->fb[id]->var; unsigned int cfg; if (id > 2) return 0; cfg = S3C_VIDOSD_SIZE(var->xres * var->yres); if (id == 0) writel(cfg, ctrl->regs + S3C_VIDOSD_C(id)); else writel(cfg, ctrl->regs + S3C_VIDOSD_D(id)); dev_dbg(ctrl->dev, "[fb%d] resolution: %d x %d\n", id, var->xres, var->yres); return 0; } int s3cfb_set_buffer_size(struct s3cfb_global *ctrl, int id) { struct fb_fix_screeninfo *fix = &ctrl->fb[id]->fix; unsigned int cfg = 0; cfg = S3C_VIDADDR_PAGEWIDTH(fix->line_length); writel(cfg, ctrl->regs + S3C_VIDADDR_SIZE(id)); return 0; } int s3cfb_set_chroma_key(struct s3cfb_global *ctrl, int id) { struct s3cfb_window *win = ctrl->fb[id]->par; struct s3cfb_chroma *chroma = &win->chroma; unsigned int cfg = 0; if (id == 0) { dev_err(ctrl->dev, "[fb%d] does not support chroma key\n", id); return -EINVAL; } cfg = (S3C_KEYCON0_KEYBLEN_DISABLE | S3C_KEYCON0_DIRCON_MATCH_FG); if (chroma->enabled) cfg |= S3C_KEYCON0_KEY_ENABLE; writel(cfg, ctrl->regs + S3C_KEYCON(id)); cfg = S3C_KEYCON1_COLVAL(chroma->key); writel(cfg, ctrl->regs + S3C_KEYVAL(id)); dev_dbg(ctrl->dev, "[fb%d] chroma key: 0x%08x, %s\n", id, cfg, chroma->enabled ? "enabled" : "disabled"); return 0; }
gpl-2.0
LeeDroid-/Flyer-2.6.35
drivers/media/dvb/dm1105/dm1105.c
892
24869
/* * dm1105.c - driver for DVB cards based on SDMC DM1105 PCI chip * * Copyright (C) 2008 Igor M. Liplianin <liplianin@me.by> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/input.h> #include <linux/slab.h> #include <media/ir-core.h> #include "demux.h" #include "dmxdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "dvbdev.h" #include "dvb-pll.h" #include "stv0299.h" #include "stv0288.h" #include "stb6000.h" #include "si21xx.h" #include "cx24116.h" #include "z0194a.h" #include "ds3000.h" #define MODULE_NAME "dm1105" #define UNSET (-1U) #define DM1105_BOARD_NOAUTO UNSET #define DM1105_BOARD_UNKNOWN 0 #define DM1105_BOARD_DVBWORLD_2002 1 #define DM1105_BOARD_DVBWORLD_2004 2 #define DM1105_BOARD_AXESS_DM05 3 /* ----------------------------------------------- */ /* * PCI ID's */ #ifndef PCI_VENDOR_ID_TRIGEM #define PCI_VENDOR_ID_TRIGEM 0x109f #endif #ifndef PCI_VENDOR_ID_AXESS #define PCI_VENDOR_ID_AXESS 0x195d #endif #ifndef PCI_DEVICE_ID_DM1105 #define PCI_DEVICE_ID_DM1105 0x036f #endif #ifndef PCI_DEVICE_ID_DW2002 #define PCI_DEVICE_ID_DW2002 0x2002 #endif #ifndef PCI_DEVICE_ID_DW2004 #define PCI_DEVICE_ID_DW2004 0x2004 #endif #ifndef PCI_DEVICE_ID_DM05 #define PCI_DEVICE_ID_DM05 0x1105 #endif /* ----------------------------------------------- */ /* sdmc dm1105 registers */ /* TS Control */ #define DM1105_TSCTR 0x00 #define DM1105_DTALENTH 0x04 /* GPIO Interface */ #define DM1105_GPIOVAL 0x08 #define DM1105_GPIOCTR 0x0c /* PID serial number */ #define DM1105_PIDN 0x10 /* Odd-even secret key select */ #define DM1105_CWSEL 0x14 /* Host Command Interface */ #define DM1105_HOST_CTR 0x18 #define DM1105_HOST_AD 0x1c /* PCI Interface */ #define DM1105_CR 0x30 #define DM1105_RST 0x34 #define DM1105_STADR 0x38 #define DM1105_RLEN 0x3c #define DM1105_WRP 0x40 #define DM1105_INTCNT 0x44 #define DM1105_INTMAK 0x48 #define DM1105_INTSTS 0x4c /* CW Value */ #define DM1105_ODD 0x50 #define DM1105_EVEN 0x58 /* PID Value */ #define DM1105_PID 0x60 /* IR Control */ #define DM1105_IRCTR 0x64 #define DM1105_IRMODE 0x68 #define DM1105_SYSTEMCODE 0x6c #define DM1105_IRCODE 0x70 /* Unknown Values */ #define DM1105_ENCRYPT 0x74 #define DM1105_VER 0x7c /* I2C Interface */ #define DM1105_I2CCTR 0x80 #define DM1105_I2CSTS 0x81 #define DM1105_I2CDAT 0x82 #define DM1105_I2C_RA 0x83 /* ----------------------------------------------- */ /* Interrupt Mask Bits */ #define INTMAK_TSIRQM 0x01 #define INTMAK_HIRQM 0x04 #define INTMAK_IRM 0x08 #define INTMAK_ALLMASK (INTMAK_TSIRQM | \ INTMAK_HIRQM | \ INTMAK_IRM) #define INTMAK_NONEMASK 0x00 /* Interrupt Status Bits */ #define INTSTS_TSIRQ 0x01 #define INTSTS_HIRQ 0x04 #define INTSTS_IR 0x08 /* IR Control Bits */ #define DM1105_IR_EN 0x01 #define DM1105_SYS_CHK 0x02 #define DM1105_REP_FLG 0x08 /* EEPROM addr */ #define IIC_24C01_addr 0xa0 /* Max board count */ #define DM1105_MAX 0x04 #define DRIVER_NAME "dm1105" #define DM1105_DMA_PACKETS 47 #define DM1105_DMA_PACKET_LENGTH (128*4) #define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS) /* GPIO's for LNB power control */ #define DM1105_LNB_MASK 0x00000000 #define DM1105_LNB_OFF 0x00020000 #define DM1105_LNB_13V 0x00010100 #define DM1105_LNB_18V 0x00000100 /* GPIO's for LNB power control for Axess DM05 */ #define DM05_LNB_MASK 0x00000000 #define DM05_LNB_OFF 0x00020000/* actually 13v */ #define DM05_LNB_13V 0x00020000 #define DM05_LNB_18V 0x00030000 static unsigned int card[] = {[0 ... 3] = UNSET }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); static int ir_debug; module_param(ir_debug, int, 0644); MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding"); static unsigned int dm1105_devcount; DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct dm1105_board { char *name; }; struct dm1105_subid { u16 subvendor; u16 subdevice; u32 card; }; static const struct dm1105_board dm1105_boards[] = { [DM1105_BOARD_UNKNOWN] = { .name = "UNKNOWN/GENERIC", }, [DM1105_BOARD_DVBWORLD_2002] = { .name = "DVBWorld PCI 2002", }, [DM1105_BOARD_DVBWORLD_2004] = { .name = "DVBWorld PCI 2004", }, [DM1105_BOARD_AXESS_DM05] = { .name = "Axess/EasyTv DM05", }, }; static const struct dm1105_subid dm1105_subids[] = { { .subvendor = 0x0000, .subdevice = 0x2002, .card = DM1105_BOARD_DVBWORLD_2002, }, { .subvendor = 0x0001, .subdevice = 0x2002, .card = DM1105_BOARD_DVBWORLD_2002, }, { .subvendor = 0x0000, .subdevice = 0x2004, .card = DM1105_BOARD_DVBWORLD_2004, }, { .subvendor = 0x0001, .subdevice = 0x2004, .card = DM1105_BOARD_DVBWORLD_2004, }, { .subvendor = 0x195d, .subdevice = 0x1105, .card = DM1105_BOARD_AXESS_DM05, }, }; static void dm1105_card_list(struct pci_dev *pci) { int i; if (0 == pci->subsystem_vendor && 0 == pci->subsystem_device) { printk(KERN_ERR "dm1105: Your board has no valid PCI Subsystem ID\n" "dm1105: and thus can't be autodetected\n" "dm1105: Please pass card=<n> insmod option to\n" "dm1105: workaround that. Redirect complaints to\n" "dm1105: the vendor of the TV card. Best regards,\n" "dm1105: -- tux\n"); } else { printk(KERN_ERR "dm1105: Your board isn't known (yet) to the driver.\n" "dm1105: You can try to pick one of the existing\n" "dm1105: card configs via card=<n> insmod option.\n" "dm1105: Updating to the latest version might help\n" "dm1105: as well.\n"); } printk(KERN_ERR "Here is a list of valid choices for the card=<n> " "insmod option:\n"); for (i = 0; i < ARRAY_SIZE(dm1105_boards); i++) printk(KERN_ERR "dm1105: card=%d -> %s\n", i, dm1105_boards[i].name); } /* infrared remote control */ struct infrared { struct input_dev *input_dev; char input_phys[32]; struct work_struct work; u32 ir_command; }; struct dm1105_dev { /* pci */ struct pci_dev *pdev; u8 __iomem *io_mem; /* ir */ struct infrared ir; /* dvb */ struct dmx_frontend hw_frontend; struct dmx_frontend mem_frontend; struct dmxdev dmxdev; struct dvb_adapter dvb_adapter; struct dvb_demux demux; struct dvb_frontend *fe; struct dvb_net dvbnet; unsigned int full_ts_users; unsigned int boardnr; int nr; /* i2c */ struct i2c_adapter i2c_adap; /* irq */ struct work_struct work; struct workqueue_struct *wq; char wqn[16]; /* dma */ dma_addr_t dma_addr; unsigned char *ts_buf; u32 wrp; u32 nextwrp; u32 buffer_size; unsigned int PacketErrorCount; unsigned int dmarst; spinlock_t lock; }; #define dm_io_mem(reg) ((unsigned long)(&dev->io_mem[reg])) #define dm_readb(reg) inb(dm_io_mem(reg)) #define dm_writeb(reg, value) outb((value), (dm_io_mem(reg))) #define dm_readw(reg) inw(dm_io_mem(reg)) #define dm_writew(reg, value) outw((value), (dm_io_mem(reg))) #define dm_readl(reg) inl(dm_io_mem(reg)) #define dm_writel(reg, value) outl((value), (dm_io_mem(reg))) #define dm_andorl(reg, mask, value) \ outl((inl(dm_io_mem(reg)) & ~(mask)) |\ ((value) & (mask)), (dm_io_mem(reg))) #define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit)) #define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0) static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct dm1105_dev *dev ; int addr, rc, i, j, k, len, byte, data; u8 status; dev = i2c_adap->algo_data; for (i = 0; i < num; i++) { dm_writeb(DM1105_I2CCTR, 0x00); if (msgs[i].flags & I2C_M_RD) { /* read bytes */ addr = msgs[i].addr << 1; addr |= 1; dm_writeb(DM1105_I2CDAT, addr); for (byte = 0; byte < msgs[i].len; byte++) dm_writeb(DM1105_I2CDAT + byte + 1, 0); dm_writeb(DM1105_I2CCTR, 0x81 + msgs[i].len); for (j = 0; j < 55; j++) { mdelay(10); status = dm_readb(DM1105_I2CSTS); if ((status & 0xc0) == 0x40) break; } if (j >= 55) return -1; for (byte = 0; byte < msgs[i].len; byte++) { rc = dm_readb(DM1105_I2CDAT + byte + 1); if (rc < 0) goto err; msgs[i].buf[byte] = rc; } } else if ((msgs[i].buf[0] == 0xf7) && (msgs[i].addr == 0x55)) { /* prepaired for cx24116 firmware */ /* Write in small blocks */ len = msgs[i].len - 1; k = 1; do { dm_writeb(DM1105_I2CDAT, msgs[i].addr << 1); dm_writeb(DM1105_I2CDAT + 1, 0xf7); for (byte = 0; byte < (len > 48 ? 48 : len); byte++) { data = msgs[i].buf[k + byte]; dm_writeb(DM1105_I2CDAT + byte + 2, data); } dm_writeb(DM1105_I2CCTR, 0x82 + (len > 48 ? 48 : len)); for (j = 0; j < 25; j++) { mdelay(10); status = dm_readb(DM1105_I2CSTS); if ((status & 0xc0) == 0x40) break; } if (j >= 25) return -1; k += 48; len -= 48; } while (len > 0); } else { /* write bytes */ dm_writeb(DM1105_I2CDAT, msgs[i].addr << 1); for (byte = 0; byte < msgs[i].len; byte++) { data = msgs[i].buf[byte]; dm_writeb(DM1105_I2CDAT + byte + 1, data); } dm_writeb(DM1105_I2CCTR, 0x81 + msgs[i].len); for (j = 0; j < 25; j++) { mdelay(10); status = dm_readb(DM1105_I2CSTS); if ((status & 0xc0) == 0x40) break; } if (j >= 25) return -1; } } return num; err: return rc; } static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C; } static struct i2c_algorithm dm1105_algo = { .master_xfer = dm1105_i2c_xfer, .functionality = functionality, }; static inline struct dm1105_dev *feed_to_dm1105_dev(struct dvb_demux_feed *feed) { return container_of(feed->demux, struct dm1105_dev, demux); } static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe) { return container_of(fe->dvb, struct dm1105_dev, dvb_adapter); } static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct dm1105_dev *dev = frontend_to_dm1105_dev(fe); u32 lnb_mask, lnb_13v, lnb_18v, lnb_off; switch (dev->boardnr) { case DM1105_BOARD_AXESS_DM05: lnb_mask = DM05_LNB_MASK; lnb_off = DM05_LNB_OFF; lnb_13v = DM05_LNB_13V; lnb_18v = DM05_LNB_18V; break; case DM1105_BOARD_DVBWORLD_2002: case DM1105_BOARD_DVBWORLD_2004: default: lnb_mask = DM1105_LNB_MASK; lnb_off = DM1105_LNB_OFF; lnb_13v = DM1105_LNB_13V; lnb_18v = DM1105_LNB_18V; } dm_writel(DM1105_GPIOCTR, lnb_mask); if (voltage == SEC_VOLTAGE_18) dm_writel(DM1105_GPIOVAL, lnb_18v); else if (voltage == SEC_VOLTAGE_13) dm_writel(DM1105_GPIOVAL, lnb_13v); else dm_writel(DM1105_GPIOVAL, lnb_off); return 0; } static void dm1105_set_dma_addr(struct dm1105_dev *dev) { dm_writel(DM1105_STADR, cpu_to_le32(dev->dma_addr)); } static int __devinit dm1105_dma_map(struct dm1105_dev *dev) { dev->ts_buf = pci_alloc_consistent(dev->pdev, 6 * DM1105_DMA_BYTES, &dev->dma_addr); return !dev->ts_buf; } static void dm1105_dma_unmap(struct dm1105_dev *dev) { pci_free_consistent(dev->pdev, 6 * DM1105_DMA_BYTES, dev->ts_buf, dev->dma_addr); } static void dm1105_enable_irqs(struct dm1105_dev *dev) { dm_writeb(DM1105_INTMAK, INTMAK_ALLMASK); dm_writeb(DM1105_CR, 1); } static void dm1105_disable_irqs(struct dm1105_dev *dev) { dm_writeb(DM1105_INTMAK, INTMAK_IRM); dm_writeb(DM1105_CR, 0); } static int dm1105_start_feed(struct dvb_demux_feed *f) { struct dm1105_dev *dev = feed_to_dm1105_dev(f); if (dev->full_ts_users++ == 0) dm1105_enable_irqs(dev); return 0; } static int dm1105_stop_feed(struct dvb_demux_feed *f) { struct dm1105_dev *dev = feed_to_dm1105_dev(f); if (--dev->full_ts_users == 0) dm1105_disable_irqs(dev); return 0; } /* ir work handler */ static void dm1105_emit_key(struct work_struct *work) { struct infrared *ir = container_of(work, struct infrared, work); u32 ircom = ir->ir_command; u8 data; if (ir_debug) printk(KERN_INFO "%s: received byte 0x%04x\n", __func__, ircom); data = (ircom >> 8) & 0x7f; ir_keydown(ir->input_dev, data, 0); } /* work handler */ static void dm1105_dmx_buffer(struct work_struct *work) { struct dm1105_dev *dev = container_of(work, struct dm1105_dev, work); unsigned int nbpackets; u32 oldwrp = dev->wrp; u32 nextwrp = dev->nextwrp; if (!((dev->ts_buf[oldwrp] == 0x47) && (dev->ts_buf[oldwrp + 188] == 0x47) && (dev->ts_buf[oldwrp + 188 * 2] == 0x47))) { dev->PacketErrorCount++; /* bad packet found */ if ((dev->PacketErrorCount >= 2) && (dev->dmarst == 0)) { dm_writeb(DM1105_RST, 1); dev->wrp = 0; dev->PacketErrorCount = 0; dev->dmarst = 0; return; } } if (nextwrp < oldwrp) { memcpy(dev->ts_buf + dev->buffer_size, dev->ts_buf, nextwrp); nbpackets = ((dev->buffer_size - oldwrp) + nextwrp) / 188; } else nbpackets = (nextwrp - oldwrp) / 188; dev->wrp = nextwrp; dvb_dmx_swfilter_packets(&dev->demux, &dev->ts_buf[oldwrp], nbpackets); } static irqreturn_t dm1105_irq(int irq, void *dev_id) { struct dm1105_dev *dev = dev_id; /* Read-Write INSTS Ack's Interrupt for DM1105 chip 16.03.2008 */ unsigned int intsts = dm_readb(DM1105_INTSTS); dm_writeb(DM1105_INTSTS, intsts); switch (intsts) { case INTSTS_TSIRQ: case (INTSTS_TSIRQ | INTSTS_IR): dev->nextwrp = dm_readl(DM1105_WRP) - dm_readl(DM1105_STADR); queue_work(dev->wq, &dev->work); break; case INTSTS_IR: dev->ir.ir_command = dm_readl(DM1105_IRCODE); schedule_work(&dev->ir.work); break; } return IRQ_HANDLED; } int __devinit dm1105_ir_init(struct dm1105_dev *dm1105) { struct input_dev *input_dev; char *ir_codes = RC_MAP_DM1105_NEC; int err = -ENOMEM; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; dm1105->ir.input_dev = input_dev; snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys), "pci-%s/ir0", pci_name(dm1105->pdev)); input_dev->name = "DVB on-card IR receiver"; input_dev->phys = dm1105->ir.input_phys; input_dev->id.bustype = BUS_PCI; input_dev->id.version = 1; if (dm1105->pdev->subsystem_vendor) { input_dev->id.vendor = dm1105->pdev->subsystem_vendor; input_dev->id.product = dm1105->pdev->subsystem_device; } else { input_dev->id.vendor = dm1105->pdev->vendor; input_dev->id.product = dm1105->pdev->device; } input_dev->dev.parent = &dm1105->pdev->dev; INIT_WORK(&dm1105->ir.work, dm1105_emit_key); err = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME); if (err < 0) { input_free_device(input_dev); return err; } return 0; } void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105) { ir_input_unregister(dm1105->ir.input_dev); } static int __devinit dm1105_hw_init(struct dm1105_dev *dev) { dm1105_disable_irqs(dev); dm_writeb(DM1105_HOST_CTR, 0); /*DATALEN 188,*/ dm_writeb(DM1105_DTALENTH, 188); /*TS_STRT TS_VALP MSBFIRST TS_MODE ALPAS TSPES*/ dm_writew(DM1105_TSCTR, 0xc10a); /* map DMA and set address */ dm1105_dma_map(dev); dm1105_set_dma_addr(dev); /* big buffer */ dm_writel(DM1105_RLEN, 5 * DM1105_DMA_BYTES); dm_writeb(DM1105_INTCNT, 47); /* IR NEC mode enable */ dm_writeb(DM1105_IRCTR, (DM1105_IR_EN | DM1105_SYS_CHK)); dm_writeb(DM1105_IRMODE, 0); dm_writew(DM1105_SYSTEMCODE, 0); return 0; } static void dm1105_hw_exit(struct dm1105_dev *dev) { dm1105_disable_irqs(dev); /* IR disable */ dm_writeb(DM1105_IRCTR, 0); dm_writeb(DM1105_INTMAK, INTMAK_NONEMASK); dm1105_dma_unmap(dev); } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct stv0288_config earda_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct si21xx_config serit_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct cx24116_config serit_sp2633_config = { .demod_address = 0x55, }; static struct ds3000_config dvbworld_ds3000_config = { .demod_address = 0x68, }; static int __devinit frontend_init(struct dm1105_dev *dev) { int ret; switch (dev->boardnr) { case DM1105_BOARD_DVBWORLD_2004: dev->fe = dvb_attach( cx24116_attach, &serit_sp2633_config, &dev->i2c_adap); if (dev->fe) { dev->fe->ops.set_voltage = dm1105_set_voltage; break; } dev->fe = dvb_attach( ds3000_attach, &dvbworld_ds3000_config, &dev->i2c_adap); if (dev->fe) dev->fe->ops.set_voltage = dm1105_set_voltage; break; case DM1105_BOARD_DVBWORLD_2002: case DM1105_BOARD_AXESS_DM05: default: dev->fe = dvb_attach( stv0299_attach, &sharp_z0194a_config, &dev->i2c_adap); if (dev->fe) { dev->fe->ops.set_voltage = dm1105_set_voltage; dvb_attach(dvb_pll_attach, dev->fe, 0x60, &dev->i2c_adap, DVB_PLL_OPERA1); break; } dev->fe = dvb_attach( stv0288_attach, &earda_config, &dev->i2c_adap); if (dev->fe) { dev->fe->ops.set_voltage = dm1105_set_voltage; dvb_attach(stb6000_attach, dev->fe, 0x61, &dev->i2c_adap); break; } dev->fe = dvb_attach( si21xx_attach, &serit_config, &dev->i2c_adap); if (dev->fe) dev->fe->ops.set_voltage = dm1105_set_voltage; } if (!dev->fe) { dev_err(&dev->pdev->dev, "could not attach frontend\n"); return -ENODEV; } ret = dvb_register_frontend(&dev->dvb_adapter, dev->fe); if (ret < 0) { if (dev->fe->ops.release) dev->fe->ops.release(dev->fe); dev->fe = NULL; return ret; } return 0; } static void __devinit dm1105_read_mac(struct dm1105_dev *dev, u8 *mac) { static u8 command[1] = { 0x28 }; struct i2c_msg msg[] = { { .addr = IIC_24C01_addr >> 1, .flags = 0, .buf = command, .len = 1 }, { .addr = IIC_24C01_addr >> 1, .flags = I2C_M_RD, .buf = mac, .len = 6 }, }; dm1105_i2c_xfer(&dev->i2c_adap, msg , 2); dev_info(&dev->pdev->dev, "MAC %pM\n", mac); } static int __devinit dm1105_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct dm1105_dev *dev; struct dvb_adapter *dvb_adapter; struct dvb_demux *dvbdemux; struct dmx_demux *dmx; int ret = -ENOMEM; int i; dev = kzalloc(sizeof(struct dm1105_dev), GFP_KERNEL); if (!dev) return -ENOMEM; /* board config */ dev->nr = dm1105_devcount; dev->boardnr = UNSET; if (card[dev->nr] < ARRAY_SIZE(dm1105_boards)) dev->boardnr = card[dev->nr]; for (i = 0; UNSET == dev->boardnr && i < ARRAY_SIZE(dm1105_subids); i++) if (pdev->subsystem_vendor == dm1105_subids[i].subvendor && pdev->subsystem_device == dm1105_subids[i].subdevice) dev->boardnr = dm1105_subids[i].card; if (UNSET == dev->boardnr) { dev->boardnr = DM1105_BOARD_UNKNOWN; dm1105_card_list(pdev); } dm1105_devcount++; dev->pdev = pdev; dev->buffer_size = 5 * DM1105_DMA_BYTES; dev->PacketErrorCount = 0; dev->dmarst = 0; ret = pci_enable_device(pdev); if (ret < 0) goto err_kfree; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret < 0) goto err_pci_disable_device; pci_set_master(pdev); ret = pci_request_regions(pdev, DRIVER_NAME); if (ret < 0) goto err_pci_disable_device; dev->io_mem = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!dev->io_mem) { ret = -EIO; goto err_pci_release_regions; } spin_lock_init(&dev->lock); pci_set_drvdata(pdev, dev); ret = dm1105_hw_init(dev); if (ret < 0) goto err_pci_iounmap; /* i2c */ i2c_set_adapdata(&dev->i2c_adap, dev); strcpy(dev->i2c_adap.name, DRIVER_NAME); dev->i2c_adap.owner = THIS_MODULE; dev->i2c_adap.class = I2C_CLASS_TV_DIGITAL; dev->i2c_adap.dev.parent = &pdev->dev; dev->i2c_adap.algo = &dm1105_algo; dev->i2c_adap.algo_data = dev; ret = i2c_add_adapter(&dev->i2c_adap); if (ret < 0) goto err_dm1105_hw_exit; /* dvb */ ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME, THIS_MODULE, &pdev->dev, adapter_nr); if (ret < 0) goto err_i2c_del_adapter; dvb_adapter = &dev->dvb_adapter; dm1105_read_mac(dev, dvb_adapter->proposed_mac); dvbdemux = &dev->demux; dvbdemux->filternum = 256; dvbdemux->feednum = 256; dvbdemux->start_feed = dm1105_start_feed; dvbdemux->stop_feed = dm1105_stop_feed; dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING); ret = dvb_dmx_init(dvbdemux); if (ret < 0) goto err_dvb_unregister_adapter; dmx = &dvbdemux->dmx; dev->dmxdev.filternum = 256; dev->dmxdev.demux = dmx; dev->dmxdev.capabilities = 0; ret = dvb_dmxdev_init(&dev->dmxdev, dvb_adapter); if (ret < 0) goto err_dvb_dmx_release; dev->hw_frontend.source = DMX_FRONTEND_0; ret = dmx->add_frontend(dmx, &dev->hw_frontend); if (ret < 0) goto err_dvb_dmxdev_release; dev->mem_frontend.source = DMX_MEMORY_FE; ret = dmx->add_frontend(dmx, &dev->mem_frontend); if (ret < 0) goto err_remove_hw_frontend; ret = dmx->connect_frontend(dmx, &dev->hw_frontend); if (ret < 0) goto err_remove_mem_frontend; ret = frontend_init(dev); if (ret < 0) goto err_disconnect_frontend; dvb_net_init(dvb_adapter, &dev->dvbnet, dmx); dm1105_ir_init(dev); INIT_WORK(&dev->work, dm1105_dmx_buffer); sprintf(dev->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num); dev->wq = create_singlethread_workqueue(dev->wqn); if (!dev->wq) goto err_dvb_net; ret = request_irq(pdev->irq, dm1105_irq, IRQF_SHARED, DRIVER_NAME, dev); if (ret < 0) goto err_workqueue; return 0; err_workqueue: destroy_workqueue(dev->wq); err_dvb_net: dvb_net_release(&dev->dvbnet); err_disconnect_frontend: dmx->disconnect_frontend(dmx); err_remove_mem_frontend: dmx->remove_frontend(dmx, &dev->mem_frontend); err_remove_hw_frontend: dmx->remove_frontend(dmx, &dev->hw_frontend); err_dvb_dmxdev_release: dvb_dmxdev_release(&dev->dmxdev); err_dvb_dmx_release: dvb_dmx_release(dvbdemux); err_dvb_unregister_adapter: dvb_unregister_adapter(dvb_adapter); err_i2c_del_adapter: i2c_del_adapter(&dev->i2c_adap); err_dm1105_hw_exit: dm1105_hw_exit(dev); err_pci_iounmap: pci_iounmap(pdev, dev->io_mem); err_pci_release_regions: pci_release_regions(pdev); err_pci_disable_device: pci_disable_device(pdev); err_kfree: pci_set_drvdata(pdev, NULL); kfree(dev); return ret; } static void __devexit dm1105_remove(struct pci_dev *pdev) { struct dm1105_dev *dev = pci_get_drvdata(pdev); struct dvb_adapter *dvb_adapter = &dev->dvb_adapter; struct dvb_demux *dvbdemux = &dev->demux; struct dmx_demux *dmx = &dvbdemux->dmx; dm1105_ir_exit(dev); dmx->close(dmx); dvb_net_release(&dev->dvbnet); if (dev->fe) dvb_unregister_frontend(dev->fe); dmx->disconnect_frontend(dmx); dmx->remove_frontend(dmx, &dev->mem_frontend); dmx->remove_frontend(dmx, &dev->hw_frontend); dvb_dmxdev_release(&dev->dmxdev); dvb_dmx_release(dvbdemux); dvb_unregister_adapter(dvb_adapter); if (&dev->i2c_adap) i2c_del_adapter(&dev->i2c_adap); dm1105_hw_exit(dev); synchronize_irq(pdev->irq); free_irq(pdev->irq, dev); pci_iounmap(pdev, dev->io_mem); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); dm1105_devcount--; kfree(dev); } static struct pci_device_id dm1105_id_table[] __devinitdata = { { .vendor = PCI_VENDOR_ID_TRIGEM, .device = PCI_DEVICE_ID_DM1105, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_AXESS, .device = PCI_DEVICE_ID_DM05, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* empty */ }, }; MODULE_DEVICE_TABLE(pci, dm1105_id_table); static struct pci_driver dm1105_driver = { .name = DRIVER_NAME, .id_table = dm1105_id_table, .probe = dm1105_probe, .remove = __devexit_p(dm1105_remove), }; static int __init dm1105_init(void) { return pci_register_driver(&dm1105_driver); } static void __exit dm1105_exit(void) { pci_unregister_driver(&dm1105_driver); } module_init(dm1105_init); module_exit(dm1105_exit); MODULE_AUTHOR("Igor M. Liplianin <liplianin@me.by>"); MODULE_DESCRIPTION("SDMC DM1105 DVB driver"); MODULE_LICENSE("GPL");
gpl-2.0
gnychis/skyrocket-i727-kernel-stock
net/bridge/netfilter/ebt_vlan.c
892
5484
/* * Description: EBTables 802.1Q match extension kernelspace module. * Authors: Nick Fedchik <nick@fedchik.org.ua> * Bart De Schuymer <bdschuym@pandora.be> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_vlan.h> #define MODULE_VERS "0.6" MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>"); MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match"); MODULE_LICENSE("GPL"); #define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_ #define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; } static bool ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_vlan_info *info = par->matchinfo; const struct vlan_hdr *fp; struct vlan_hdr _frame; unsigned short TCI; /* Whole TCI, given from parsed frame */ unsigned short id; /* VLAN ID, given from frame TCI */ unsigned char prio; /* user_priority, given from frame TCI */ /* VLAN encapsulated Type/Length field, given from orig frame */ __be16 encap; fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame); if (fp == NULL) return false; /* Tag Control Information (TCI) consists of the following elements: * - User_priority. The user_priority field is three bits in length, * interpreted as a binary number. * - Canonical Format Indicator (CFI). The Canonical Format Indicator * (CFI) is a single bit flag value. Currently ignored. * - VLAN Identifier (VID). The VID is encoded as * an unsigned binary number. */ TCI = ntohs(fp->h_vlan_TCI); id = TCI & VLAN_VID_MASK; prio = (TCI >> 13) & 0x7; encap = fp->h_vlan_encapsulated_proto; /* Checking VLAN Identifier (VID) */ if (GET_BITMASK(EBT_VLAN_ID)) EXIT_ON_MISMATCH(id, EBT_VLAN_ID); /* Checking user_priority */ if (GET_BITMASK(EBT_VLAN_PRIO)) EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO); /* Checking Encapsulated Proto (Length/Type) field */ if (GET_BITMASK(EBT_VLAN_ENCAP)) EXIT_ON_MISMATCH(encap, EBT_VLAN_ENCAP); return true; } static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) { struct ebt_vlan_info *info = par->matchinfo; const struct ebt_entry *e = par->entryinfo; /* Is it 802.1Q frame checked? */ if (e->ethproto != htons(ETH_P_8021Q)) { pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n", ntohs(e->ethproto)); return -EINVAL; } /* Check for bitmask range * True if even one bit is out of mask */ if (info->bitmask & ~EBT_VLAN_MASK) { pr_debug("bitmask %2X is out of mask (%2X)\n", info->bitmask, EBT_VLAN_MASK); return -EINVAL; } /* Check for inversion flags range */ if (info->invflags & ~EBT_VLAN_MASK) { pr_debug("inversion flags %2X is out of mask (%2X)\n", info->invflags, EBT_VLAN_MASK); return -EINVAL; } /* Reserved VLAN ID (VID) values * ----------------------------- * 0 - The null VLAN ID. * 1 - The default Port VID (PVID) * 0x0FFF - Reserved for implementation use. * if_vlan.h: VLAN_GROUP_ARRAY_LEN 4096. */ if (GET_BITMASK(EBT_VLAN_ID)) { if (!!info->id) { /* if id!=0 => check vid range */ if (info->id > VLAN_GROUP_ARRAY_LEN) { pr_debug("id %d is out of range (1-4096)\n", info->id); return -EINVAL; } /* Note: This is valid VLAN-tagged frame point. * Any value of user_priority are acceptable, * but should be ignored according to 802.1Q Std. * So we just drop the prio flag. */ info->bitmask &= ~EBT_VLAN_PRIO; } /* Else, id=0 (null VLAN ID) => user_priority range (any?) */ } if (GET_BITMASK(EBT_VLAN_PRIO)) { if ((unsigned char) info->prio > 7) { pr_debug("prio %d is out of range (0-7)\n", info->prio); return -EINVAL; } } /* Check for encapsulated proto range - it is possible to be * any value for u_short range. * if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */ if (GET_BITMASK(EBT_VLAN_ENCAP)) { if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) { pr_debug("encap frame length %d is less than " "minimal\n", ntohs(info->encap)); return -EINVAL; } } return 0; } static struct xt_match ebt_vlan_mt_reg __read_mostly = { .name = "vlan", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_vlan_mt, .checkentry = ebt_vlan_mt_check, .matchsize = sizeof(struct ebt_vlan_info), .me = THIS_MODULE, }; static int __init ebt_vlan_init(void) { pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n"); return xt_register_match(&ebt_vlan_mt_reg); } static void __exit ebt_vlan_fini(void) { xt_unregister_match(&ebt_vlan_mt_reg); } module_init(ebt_vlan_init); module_exit(ebt_vlan_fini);
gpl-2.0
vathpela/linux-esrt
net/core/tso.c
1148
2108
#include <linux/export.h> #include <net/ip.h> #include <net/tso.h> #include <asm/unaligned.h> /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct iphdr *iph; struct tcphdr *tcph; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); iph = (struct iphdr *)(hdr + mac_hdr_len); iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); tso->ip_id++; if (!is_last) { /* Clear all special flags for not last packet */ tcph->psh = 0; tcph->fin = 0; tcph->rst = 0; } } EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; tso->data += size; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_build_data); void tso_start(struct sk_buff *skb, struct tso_t *tso) { int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; /* Build first data */ tso->size = skb_headlen(skb) - hdr_len; tso->data = skb->data + hdr_len; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_start);
gpl-2.0
anoane/Ultrakernel
net/netfilter/ipvs/ip_vs_nq.c
1660
3557
/* * IPVS: Never Queue scheduling module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ /* * The NQ algorithm adopts a two-speed model. When there is an idle server * available, the job will be sent to the idle server, instead of waiting * for a fast one. When there is no idle server available, the job will be * sent to the server that minimize its expected delay (The Shortest * Expected Delay scheduling algorithm). * * See the following paper for more information: * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing * in large heterogeneous systems. In Proceedings IEEE INFOCOM'88, * pages 986-994, 1988. * * Thanks must go to Marko Buuri <marko@buuri.name> for talking NQ to me. * * The difference between NQ and SED is that NQ can improve overall * system utilization. * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> static inline unsigned int ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) { /* * We only use the active connection number in the cost * calculation here. */ return atomic_read(&dest->activeconns) + 1; } /* * Weighted Least Connection scheduling */ static struct ip_vs_dest * ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest, *least = NULL; unsigned int loh = 0, doh; IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); /* * We calculate the load of each dest server as follows: * (server expected overhead) / dest->weight * * Remember -- no floats in kernel mode!!! * The comparison of h1*w2 > h2*w1 is equivalent to that of * h1/w1 > h2/w2 * if every weight is larger than zero. * * The server with weight=0 is quiesced and will not receive any * new connections. */ list_for_each_entry(dest, &svc->destinations, n_list) { if (dest->flags & IP_VS_DEST_F_OVERLOAD || !atomic_read(&dest->weight)) continue; doh = ip_vs_nq_dest_overhead(dest); /* return the server directly if it is idle */ if (atomic_read(&dest->activeconns) == 0) { least = dest; loh = doh; goto out; } if (!least || (loh * atomic_read(&dest->weight) > doh * atomic_read(&least->weight))) { least = dest; loh = doh; } } if (!least) { IP_VS_ERR_RL("NQ: no destination available\n"); return NULL; } out: IP_VS_DBG_BUF(6, "NQ: server %s:%u " "activeconns %d refcnt %d weight %d overhead %d\n", IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); return least; } static struct ip_vs_scheduler ip_vs_nq_scheduler = { .name = "nq", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), .schedule = ip_vs_nq_schedule, }; static int __init ip_vs_nq_init(void) { return register_ip_vs_scheduler(&ip_vs_nq_scheduler); } static void __exit ip_vs_nq_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_nq_scheduler); } module_init(ip_vs_nq_init); module_exit(ip_vs_nq_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
thanhphat11/Googy-Max-N4-TW511-Kernel
drivers/md/persistent-data/dm-space-map-common.c
1660
16313
/* * Copyright (C) 2011 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-space-map-common.h" #include "dm-transaction-manager.h" #include <linux/bitops.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "space map common" /*----------------------------------------------------------------*/ /* * Index validator. */ #define INDEX_CSUM_XOR 160478 static void index_prepare_for_write(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct disk_metadata_index *mi_le = dm_block_data(b); mi_le->blocknr = cpu_to_le64(dm_block_location(b)); mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding, block_size - sizeof(__le32), INDEX_CSUM_XOR)); } static int index_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct disk_metadata_index *mi_le = dm_block_data(b); __le32 csum_disk; if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) { DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu", le64_to_cpu(mi_le->blocknr), dm_block_location(b)); return -ENOTBLK; } csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding, block_size - sizeof(__le32), INDEX_CSUM_XOR)); if (csum_disk != mi_le->csum) { DMERR_LIMIT("index_check failed: csum %u != wanted %u", le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); return -EILSEQ; } return 0; } static struct dm_block_validator index_validator = { .name = "index", .prepare_for_write = index_prepare_for_write, .check = index_check }; /*----------------------------------------------------------------*/ /* * Bitmap validator */ #define BITMAP_CSUM_XOR 240779 static void bitmap_prepare_for_write(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct disk_bitmap_header *disk_header = dm_block_data(b); disk_header->blocknr = cpu_to_le64(dm_block_location(b)); disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, block_size - sizeof(__le32), BITMAP_CSUM_XOR)); } static int bitmap_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct disk_bitmap_header *disk_header = dm_block_data(b); __le32 csum_disk; if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu", le64_to_cpu(disk_header->blocknr), dm_block_location(b)); return -ENOTBLK; } csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, block_size - sizeof(__le32), BITMAP_CSUM_XOR)); if (csum_disk != disk_header->csum) { DMERR_LIMIT("bitmap check failed: csum %u != wanted %u", le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); return -EILSEQ; } return 0; } static struct dm_block_validator dm_sm_bitmap_validator = { .name = "sm_bitmap", .prepare_for_write = bitmap_prepare_for_write, .check = bitmap_check }; /*----------------------------------------------------------------*/ #define ENTRIES_PER_WORD 32 #define ENTRIES_SHIFT 5 static void *dm_bitmap_data(struct dm_block *b) { return dm_block_data(b) + sizeof(struct disk_bitmap_header); } #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL static unsigned bitmap_word_used(void *addr, unsigned b) { __le64 *words_le = addr; __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); uint64_t bits = le64_to_cpu(*w_le); uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH; return !(~bits & mask); } static unsigned sm_lookup_bitmap(void *addr, unsigned b) { __le64 *words_le = addr; __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); unsigned hi, lo; b = (b & (ENTRIES_PER_WORD - 1)) << 1; hi = !!test_bit_le(b, (void *) w_le); lo = !!test_bit_le(b + 1, (void *) w_le); return (hi << 1) | lo; } static void sm_set_bitmap(void *addr, unsigned b, unsigned val) { __le64 *words_le = addr; __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); b = (b & (ENTRIES_PER_WORD - 1)) << 1; if (val & 2) __set_bit_le(b, (void *) w_le); else __clear_bit_le(b, (void *) w_le); if (val & 1) __set_bit_le(b + 1, (void *) w_le); else __clear_bit_le(b + 1, (void *) w_le); } static int sm_find_free(void *addr, unsigned begin, unsigned end, unsigned *result) { while (begin < end) { if (!(begin & (ENTRIES_PER_WORD - 1)) && bitmap_word_used(addr, begin)) { begin += ENTRIES_PER_WORD; continue; } if (!sm_lookup_bitmap(addr, begin)) { *result = begin; return 0; } begin++; } return -ENOSPC; } /*----------------------------------------------------------------*/ static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm) { ll->tm = tm; ll->bitmap_info.tm = tm; ll->bitmap_info.levels = 1; /* * Because the new bitmap blocks are created via a shadow * operation, the old entry has already had its reference count * decremented and we don't need the btree to do any bookkeeping. */ ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry); ll->bitmap_info.value_type.inc = NULL; ll->bitmap_info.value_type.dec = NULL; ll->bitmap_info.value_type.equal = NULL; ll->ref_count_info.tm = tm; ll->ref_count_info.levels = 1; ll->ref_count_info.value_type.size = sizeof(uint32_t); ll->ref_count_info.value_type.inc = NULL; ll->ref_count_info.value_type.dec = NULL; ll->ref_count_info.value_type.equal = NULL; ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm)); if (ll->block_size > (1 << 30)) { DMERR("block size too big to hold bitmaps"); return -EINVAL; } ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) * ENTRIES_PER_BYTE; ll->nr_blocks = 0; ll->bitmap_root = 0; ll->ref_count_root = 0; ll->bitmap_index_changed = false; return 0; } int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks) { int r; dm_block_t i, nr_blocks, nr_indexes; unsigned old_blocks, blocks; nr_blocks = ll->nr_blocks + extra_blocks; old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block); blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block); nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); if (nr_indexes > ll->max_entries(ll)) { DMERR("space map too large"); return -EINVAL; } /* * We need to set this before the dm_tm_new_block() call below. */ ll->nr_blocks = nr_blocks; for (i = old_blocks; i < blocks; i++) { struct dm_block *b; struct disk_index_entry idx; r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b); if (r < 0) return r; idx.blocknr = cpu_to_le64(dm_block_location(b)); r = dm_tm_unlock(ll->tm, b); if (r < 0) return r; idx.nr_free = cpu_to_le32(ll->entries_per_block); idx.none_free_before = 0; r = ll->save_ie(ll, i, &idx); if (r < 0) return r; } return 0; } int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) { int r; dm_block_t index = b; struct disk_index_entry ie_disk; struct dm_block *blk; b = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) return r; r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk); if (r < 0) return r; *result = sm_lookup_bitmap(dm_bitmap_data(blk), b); return dm_tm_unlock(ll->tm, blk); } int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) { __le32 le_rc; int r = sm_ll_lookup_bitmap(ll, b, result); if (r) return r; if (*result != 3) return r; r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc); if (r < 0) return r; *result = le32_to_cpu(le_rc); return r; } int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, dm_block_t end, dm_block_t *result) { int r; struct disk_index_entry ie_disk; dm_block_t i, index_begin = begin; dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block); /* * FIXME: Use shifts */ begin = do_div(index_begin, ll->entries_per_block); end = do_div(end, ll->entries_per_block); for (i = index_begin; i < index_end; i++, begin = 0) { struct dm_block *blk; unsigned position; uint32_t bit_end; r = ll->load_ie(ll, i, &ie_disk); if (r < 0) return r; if (le32_to_cpu(ie_disk.nr_free) == 0) continue; r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk); if (r < 0) return r; bit_end = (i == index_end - 1) ? end : ll->entries_per_block; r = sm_find_free(dm_bitmap_data(blk), max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)), bit_end, &position); if (r == -ENOSPC) { /* * This might happen because we started searching * part way through the bitmap. */ dm_tm_unlock(ll->tm, blk); continue; } else if (r < 0) { dm_tm_unlock(ll->tm, blk); return r; } r = dm_tm_unlock(ll->tm, blk); if (r < 0) return r; *result = i * ll->entries_per_block + (dm_block_t) position; return 0; } return -ENOSPC; } int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev) { int r; uint32_t bit, old; struct dm_block *nb; dm_block_t index = b; struct disk_index_entry ie_disk; void *bm_le; int inc; bit = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) return r; r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &nb, &inc); if (r < 0) { DMERR("dm_tm_shadow_block() failed"); return r; } ie_disk.blocknr = cpu_to_le64(dm_block_location(nb)); bm_le = dm_bitmap_data(nb); old = sm_lookup_bitmap(bm_le, bit); if (ref_count <= 2) { sm_set_bitmap(bm_le, bit, ref_count); r = dm_tm_unlock(ll->tm, nb); if (r < 0) return r; if (old > 2) { r = dm_btree_remove(&ll->ref_count_info, ll->ref_count_root, &b, &ll->ref_count_root); if (r) return r; } } else { __le32 le_rc = cpu_to_le32(ref_count); sm_set_bitmap(bm_le, bit, 3); r = dm_tm_unlock(ll->tm, nb); if (r < 0) return r; __dm_bless_for_disk(&le_rc); r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc, &ll->ref_count_root); if (r < 0) { DMERR("ref count insert failed"); return r; } } if (ref_count && !old) { *ev = SM_ALLOC; ll->nr_allocated++; le32_add_cpu(&ie_disk.nr_free, -1); if (le32_to_cpu(ie_disk.none_free_before) == bit) ie_disk.none_free_before = cpu_to_le32(bit + 1); } else if (old && !ref_count) { *ev = SM_FREE; ll->nr_allocated--; le32_add_cpu(&ie_disk.nr_free, 1); ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); } return ll->save_ie(ll, index, &ie_disk); } int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) { int r; uint32_t rc; r = sm_ll_lookup(ll, b, &rc); if (r) return r; return sm_ll_insert(ll, b, rc + 1, ev); } int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) { int r; uint32_t rc; r = sm_ll_lookup(ll, b, &rc); if (r) return r; if (!rc) return -EINVAL; return sm_ll_insert(ll, b, rc - 1, ev); } int sm_ll_commit(struct ll_disk *ll) { int r = 0; if (ll->bitmap_index_changed) { r = ll->commit(ll); if (!r) ll->bitmap_index_changed = false; } return r; } /*----------------------------------------------------------------*/ static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie) { memcpy(ie, ll->mi_le.index + index, sizeof(*ie)); return 0; } static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie) { ll->bitmap_index_changed = true; memcpy(ll->mi_le.index + index, ie, sizeof(*ie)); return 0; } static int metadata_ll_init_index(struct ll_disk *ll) { int r; struct dm_block *b; r = dm_tm_new_block(ll->tm, &index_validator, &b); if (r < 0) return r; memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le)); ll->bitmap_root = dm_block_location(b); return dm_tm_unlock(ll->tm, b); } static int metadata_ll_open(struct ll_disk *ll) { int r; struct dm_block *block; r = dm_tm_read_lock(ll->tm, ll->bitmap_root, &index_validator, &block); if (r) return r; memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le)); return dm_tm_unlock(ll->tm, block); } static dm_block_t metadata_ll_max_entries(struct ll_disk *ll) { return MAX_METADATA_BITMAPS; } static int metadata_ll_commit(struct ll_disk *ll) { int r, inc; struct dm_block *b; r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc); if (r) return r; memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le)); ll->bitmap_root = dm_block_location(b); return dm_tm_unlock(ll->tm, b); } int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm) { int r; r = sm_ll_init(ll, tm); if (r < 0) return r; ll->load_ie = metadata_ll_load_ie; ll->save_ie = metadata_ll_save_ie; ll->init_index = metadata_ll_init_index; ll->open_index = metadata_ll_open; ll->max_entries = metadata_ll_max_entries; ll->commit = metadata_ll_commit; ll->nr_blocks = 0; ll->nr_allocated = 0; r = ll->init_index(ll); if (r < 0) return r; r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); if (r < 0) return r; return 0; } int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm, void *root_le, size_t len) { int r; struct disk_sm_root *smr = root_le; if (len < sizeof(struct disk_sm_root)) { DMERR("sm_metadata root too small"); return -ENOMEM; } r = sm_ll_init(ll, tm); if (r < 0) return r; ll->load_ie = metadata_ll_load_ie; ll->save_ie = metadata_ll_save_ie; ll->init_index = metadata_ll_init_index; ll->open_index = metadata_ll_open; ll->max_entries = metadata_ll_max_entries; ll->commit = metadata_ll_commit; ll->nr_blocks = le64_to_cpu(smr->nr_blocks); ll->nr_allocated = le64_to_cpu(smr->nr_allocated); ll->bitmap_root = le64_to_cpu(smr->bitmap_root); ll->ref_count_root = le64_to_cpu(smr->ref_count_root); return ll->open_index(ll); } /*----------------------------------------------------------------*/ static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie) { return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie); } static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie) { __dm_bless_for_disk(ie); return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root, &index, ie, &ll->bitmap_root); } static int disk_ll_init_index(struct ll_disk *ll) { return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root); } static int disk_ll_open(struct ll_disk *ll) { /* nothing to do */ return 0; } static dm_block_t disk_ll_max_entries(struct ll_disk *ll) { return -1ULL; } static int disk_ll_commit(struct ll_disk *ll) { return 0; } int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm) { int r; r = sm_ll_init(ll, tm); if (r < 0) return r; ll->load_ie = disk_ll_load_ie; ll->save_ie = disk_ll_save_ie; ll->init_index = disk_ll_init_index; ll->open_index = disk_ll_open; ll->max_entries = disk_ll_max_entries; ll->commit = disk_ll_commit; ll->nr_blocks = 0; ll->nr_allocated = 0; r = ll->init_index(ll); if (r < 0) return r; r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); if (r < 0) return r; return 0; } int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm, void *root_le, size_t len) { int r; struct disk_sm_root *smr = root_le; if (len < sizeof(struct disk_sm_root)) { DMERR("sm_metadata root too small"); return -ENOMEM; } r = sm_ll_init(ll, tm); if (r < 0) return r; ll->load_ie = disk_ll_load_ie; ll->save_ie = disk_ll_save_ie; ll->init_index = disk_ll_init_index; ll->open_index = disk_ll_open; ll->max_entries = disk_ll_max_entries; ll->commit = disk_ll_commit; ll->nr_blocks = le64_to_cpu(smr->nr_blocks); ll->nr_allocated = le64_to_cpu(smr->nr_allocated); ll->bitmap_root = le64_to_cpu(smr->bitmap_root); ll->ref_count_root = le64_to_cpu(smr->ref_count_root); return ll->open_index(ll); } /*----------------------------------------------------------------*/
gpl-2.0
denzfarid/rndc-kernel
arch/mips/mm/sc-ip22.c
1916
3885
/* * sc-ip22.c: Indy cache management functions. * * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org), * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com). */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/bcache.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/bootinfo.h> #include <asm/sgi/ip22.h> #include <asm/sgi/mc.h> /* Secondary cache size in bytes, if present. */ static unsigned long scache_size; #undef DEBUG_CACHE #define SC_SIZE 0x00080000 #define SC_LINE 32 #define CI_MASK (SC_SIZE - SC_LINE) #define SC_INDEX(n) ((n) & CI_MASK) static inline void indy_sc_wipe(unsigned long first, unsigned long last) { unsigned long tmp; __asm__ __volatile__( ".set\tpush\t\t\t# indy_sc_wipe\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" ".set\tnoat\n\t" "mfc0\t%2, $12\n\t" "li\t$1, 0x80\t\t\t# Go 64 bit\n\t" "mtc0\t$1, $12\n\t" "dli\t$1, 0x9000000080000000\n\t" "or\t%0, $1\t\t\t# first line to flush\n\t" "or\t%1, $1\t\t\t# last line to flush\n\t" ".set\tat\n\t" "1:\tsw\t$0, 0(%0)\n\t" "bne\t%0, %1, 1b\n\t" " daddu\t%0, 32\n\t" "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t" "nop; nop; nop; nop;\n\t" ".set\tpop" : "=r" (first), "=r" (last), "=&r" (tmp) : "0" (first), "1" (last)); } static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size) { unsigned long first_line, last_line; unsigned long flags; #ifdef DEBUG_CACHE printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size); #endif /* Catch bad driver code */ BUG_ON(size == 0); /* Which lines to flush? */ first_line = SC_INDEX(addr); last_line = SC_INDEX(addr + size - 1); local_irq_save(flags); if (first_line <= last_line) { indy_sc_wipe(first_line, last_line); goto out; } indy_sc_wipe(first_line, SC_SIZE - SC_LINE); indy_sc_wipe(0, last_line); out: local_irq_restore(flags); } static void indy_sc_enable(void) { unsigned long addr, tmp1, tmp2; /* This is really cool... */ #ifdef DEBUG_CACHE printk("Enabling R4600 SCACHE\n"); #endif __asm__ __volatile__( ".set\tpush\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" "mfc0\t%2, $12\n\t" "nop; nop; nop; nop;\n\t" "li\t%1, 0x80\n\t" "mtc0\t%1, $12\n\t" "nop; nop; nop; nop;\n\t" "li\t%0, 0x1\n\t" "dsll\t%0, 31\n\t" "lui\t%1, 0x9000\n\t" "dsll32\t%1, 0\n\t" "or\t%0, %1, %0\n\t" "sb\t$0, 0(%0)\n\t" "mtc0\t$0, $12\n\t" "nop; nop; nop; nop;\n\t" "mtc0\t%2, $12\n\t" "nop; nop; nop; nop;\n\t" ".set\tpop" : "=r" (tmp1), "=r" (tmp2), "=r" (addr)); } static void indy_sc_disable(void) { unsigned long tmp1, tmp2, tmp3; #ifdef DEBUG_CACHE printk("Disabling R4600 SCACHE\n"); #endif __asm__ __volatile__( ".set\tpush\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" "li\t%0, 0x1\n\t" "dsll\t%0, 31\n\t" "lui\t%1, 0x9000\n\t" "dsll32\t%1, 0\n\t" "or\t%0, %1, %0\n\t" "mfc0\t%2, $12\n\t" "nop; nop; nop; nop\n\t" "li\t%1, 0x80\n\t" "mtc0\t%1, $12\n\t" "nop; nop; nop; nop\n\t" "sh\t$0, 0(%0)\n\t" "mtc0\t$0, $12\n\t" "nop; nop; nop; nop\n\t" "mtc0\t%2, $12\n\t" "nop; nop; nop; nop\n\t" ".set\tpop" : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)); } static inline int __init indy_sc_probe(void) { unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17); if (size == 0) return 0; size <<= PAGE_SHIFT; printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n", size >> 10); scache_size = size; return 1; } /* XXX Check with wje if the Indy caches can differenciate between writeback + invalidate and just invalidate. */ static struct bcache_ops indy_sc_ops = { .bc_enable = indy_sc_enable, .bc_disable = indy_sc_disable, .bc_wback_inv = indy_sc_wback_invalidate, .bc_inv = indy_sc_wback_invalidate }; void __cpuinit indy_sc_init(void) { if (indy_sc_probe()) { indy_sc_enable(); bcops = &indy_sc_ops; } }
gpl-2.0
BorqsIndia/polaris-kernel
arch/arc/kernel/troubleshoot.c
1916
8922
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as */ #include <linux/ptrace.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/fs_struct.h> #include <linux/proc_fs.h> #include <linux/file.h> #include <asm/arcregs.h> /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. * -To continue, callee regs right after scratch, special handling of CR */ static noinline void print_reg_file(long *reg_rev, int start_num) { unsigned int i; char buf[512]; int n = 0, len = sizeof(buf); for (i = start_num; i < start_num + 13; i++) { n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev); if (((i + 1) % 3) == 0) n += scnprintf(buf + n, len - n, "\n"); /* because pt_regs has regs reversed: r12..r0, r25..r13 */ reg_rev--; } if (start_num != 0) n += scnprintf(buf + n, len - n, "\n\n"); /* To continue printing callee regs on same line as scratch regs */ if (start_num == 0) pr_info("%s", buf); else pr_cont("%s\n", buf); } static void show_callee_regs(struct callee_regs *cregs) { print_reg_file(&(cregs->r13), 13); } void print_task_path_n_nm(struct task_struct *tsk, char *buf) { struct path path; char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; mm = get_task_mm(tsk); if (!mm) goto done; exe_file = get_mm_exe_file(mm); mmput(mm); if (exe_file) { path = exe_file->f_path; path_get(&exe_file->f_path); fput(exe_file); path_nm = d_path(&path, buf, 255); path_put(&path); } done: pr_info("Path: %s\n", path_nm); } EXPORT_SYMBOL(print_task_path_n_nm); static void show_faulting_vma(unsigned long address, char *buf) { struct vm_area_struct *vma; struct inode *inode; unsigned long ino = 0; dev_t dev = 0; char *nm = buf; /* can't use print_vma_addr() yet as it doesn't check for * non-inclusive vma */ vma = find_vma(current->active_mm, address); /* check against the find_vma( ) behaviour which returns the next VMA * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { struct file *file = vma->vm_file; if (file) { struct path *path = &file->f_path; nm = d_path(path, buf, PAGE_SIZE - 1); inode = vma->vm_file->f_path.dentry->d_inode; dev = inode->i_sb->s_dev; ino = inode->i_ino; } pr_info(" @off 0x%lx in [%s]\n" " VMA: 0x%08lx to 0x%08lx\n", vma->vm_start < TASK_UNMAPPED_BASE ? address : address - vma->vm_start, nm, vma->vm_start, vma->vm_end); } else { pr_info(" @No matching VMA found\n"); } } static void show_ecr_verbose(struct pt_regs *regs) { unsigned int vec, cause_code, cause_reg; unsigned long address; cause_reg = current->thread.cause_code; pr_info("\n[ECR ]: 0x%08x => ", cause_reg); /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; vec = cause_reg >> 16; cause_code = (cause_reg >> 8) & 0xFF; /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", (cause_code == 0x01) ? "Read From" : ((cause_code == 0x02) ? "Write to" : "EX"), address, regs->ret); } else if (vec == ECR_V_ITLB_MISS) { pr_cont("Insn could not be fetched\n"); } else if (vec == ECR_V_MACH_CHK) { pr_cont("%s\n", (cause_code == 0x0) ? "Double Fault" : "Other Fatal Err"); } else if (vec == ECR_V_PROTV) { if (cause_code == ECR_C_PROTV_INST_FETCH) pr_cont("Execute from Non-exec Page\n"); else if (cause_code == ECR_C_PROTV_LOAD) pr_cont("Read from Non-readable Page\n"); else if (cause_code == ECR_C_PROTV_STORE) pr_cont("Write to Non-writable Page\n"); else if (cause_code == ECR_C_PROTV_XCHG) pr_cont("Data exchange protection violation\n"); else if (cause_code == ECR_C_PROTV_MISALIG_DATA) pr_cont("Misaligned r/w from 0x%08lx\n", address); } else if (vec == ECR_V_INSN_ERR) { pr_cont("Illegal Insn\n"); } else { pr_cont("Check Programmer's Manual\n"); } } /************************************************************************ * API called by rest of kernel ***********************************************************************/ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; struct callee_regs *cregs; char *buf; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return; print_task_path_n_nm(tsk, buf); show_regs_print_info(KERN_INFO); if (current->thread.cause_code) show_ecr_verbose(regs); pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", current->thread.fault_address, (void *)regs->blink, (void *)regs->ret); if (user_mode(regs)) show_faulting_vma(regs->ret, buf); /* faulting code, not data */ pr_info("[STAT32]: 0x%08lx", regs->status32); #define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : "" if (!user_mode(regs)) pr_cont(" : %2s %2s %2s %2s %2s\n", STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), STS_BIT(regs, E2), STS_BIT(regs, E1)); pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", regs->bta, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start, regs->lp_end, regs->lp_count); /* print regs->r0 thru regs->r12 * Sequential printing was generating horrible code */ print_reg_file(&(regs->r0), 0); /* If Callee regs were saved, display them too */ cregs = (struct callee_regs *)current->thread.callee_reg; if (cregs) show_callee_regs(cregs); free_page((unsigned long)buf); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address, unsigned long cause_reg) { current->thread.fault_address = address; current->thread.cause_code = cause_reg; /* Caller and Callee regs */ show_regs(regs); /* Show stack trace if this Fatality happened in kernel mode */ if (!user_mode(regs)) show_stacktrace(current, regs); } #ifdef CONFIG_DEBUG_FS #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/debugfs.h> static struct dentry *test_dentry; static struct dentry *test_dir; static struct dentry *test_u32_dentry; static u32 clr_on_read = 1; #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT u32 numitlb, numdtlb, num_pte_not_present; static int fill_display_data(char *kbuf) { size_t num = 0; num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb); num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb); num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present); if (clr_on_read) numitlb = numdtlb = num_pte_not_present = 0; return num; } static int tlb_stats_open(struct inode *inode, struct file *file) { file->private_data = (void *)__get_free_page(GFP_KERNEL); return 0; } /* called on user read(): display the couters */ static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ char __user *user_buf, /* user buffer */ size_t len, /* length of buffer */ loff_t *offset) /* offset in the file */ { size_t num; char *kbuf = (char *)file->private_data; /* All of the data can he shoved in one iteration */ if (*offset != 0) return 0; num = fill_display_data(kbuf); /* simple_read_from_buffer() is helper for copy to user space It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset @3 (offset) into the user space address starting at @1 (user_buf). @5 (len) is max size of user buffer */ return simple_read_from_buffer(user_buf, num, offset, kbuf, len); } /* called on user write : clears the counters */ static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf, size_t length, loff_t *offset) { numitlb = numdtlb = num_pte_not_present = 0; return length; } static int tlb_stats_close(struct inode *inode, struct file *file) { free_page((unsigned long)(file->private_data)); return 0; } static const struct file_operations tlb_stats_file_ops = { .read = tlb_stats_output, .write = tlb_stats_clear, .open = tlb_stats_open, .release = tlb_stats_close }; #endif static int __init arc_debugfs_init(void) { test_dir = debugfs_create_dir("arc", NULL); #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL, &tlb_stats_file_ops); #endif test_u32_dentry = debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read); return 0; } module_init(arc_debugfs_init); static void __exit arc_debugfs_exit(void) { debugfs_remove(test_u32_dentry); debugfs_remove(test_dentry); debugfs_remove(test_dir); } module_exit(arc_debugfs_exit); #endif
gpl-2.0
prasanna08/android_kernel_yu_msm8916
drivers/tty/serial/apbuart.c
2172
16393
/* * Driver for GRLIB serial ports (APBUART) * * Based on linux/drivers/serial/amba.c * * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2003 Konrad Eisele <eiselekd@web.de> * Copyright (C) 2006 Daniel Hellstrom <daniel@gaisler.com>, Aeroflex Gaisler AB * Copyright (C) 2008 Gilead Kutnick <kutnickg@zin-tech.com> * Copyright (C) 2009 Kristoffer Glembo <kristoffer@gaisler.com>, Aeroflex Gaisler AB */ #if defined(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/kthread.h> #include <linux/device.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/serial_core.h> #include <asm/irq.h> #include "apbuart.h" #define SERIAL_APBUART_MAJOR TTY_MAJOR #define SERIAL_APBUART_MINOR 64 #define UART_DUMMY_RSR_RX 0x8000 /* for ignore all read */ static void apbuart_tx_chars(struct uart_port *port); static void apbuart_stop_tx(struct uart_port *port) { unsigned int cr; cr = UART_GET_CTRL(port); cr &= ~UART_CTRL_TI; UART_PUT_CTRL(port, cr); } static void apbuart_start_tx(struct uart_port *port) { unsigned int cr; cr = UART_GET_CTRL(port); cr |= UART_CTRL_TI; UART_PUT_CTRL(port, cr); if (UART_GET_STATUS(port) & UART_STATUS_THE) apbuart_tx_chars(port); } static void apbuart_stop_rx(struct uart_port *port) { unsigned int cr; cr = UART_GET_CTRL(port); cr &= ~(UART_CTRL_RI); UART_PUT_CTRL(port, cr); } static void apbuart_enable_ms(struct uart_port *port) { /* No modem status change interrupts for APBUART */ } static void apbuart_rx_chars(struct uart_port *port) { unsigned int status, ch, rsr, flag; unsigned int max_chars = port->fifosize; status = UART_GET_STATUS(port); while (UART_RX_DATA(status) && (max_chars--)) { ch = UART_GET_CHAR(port); flag = TTY_NORMAL; port->icount.rx++; rsr = UART_GET_STATUS(port) | UART_DUMMY_RSR_RX; UART_PUT_STATUS(port, 0); if (rsr & UART_STATUS_ERR) { if (rsr & UART_STATUS_BR) { rsr &= ~(UART_STATUS_FE | UART_STATUS_PE); port->icount.brk++; if (uart_handle_break(port)) goto ignore_char; } else if (rsr & UART_STATUS_PE) { port->icount.parity++; } else if (rsr & UART_STATUS_FE) { port->icount.frame++; } if (rsr & UART_STATUS_OE) port->icount.overrun++; rsr &= port->read_status_mask; if (rsr & UART_STATUS_PE) flag = TTY_PARITY; else if (rsr & UART_STATUS_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(port, ch)) goto ignore_char; uart_insert_char(port, rsr, UART_STATUS_OE, ch, flag); ignore_char: status = UART_GET_STATUS(port); } tty_flip_buffer_push(&port->state->port); } static void apbuart_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; int count; if (port->x_char) { UART_PUT_CHAR(port, port->x_char); port->icount.tx++; port->x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { apbuart_stop_tx(port); return; } /* amba: fill FIFO */ count = port->fifosize >> 1; do { UART_PUT_CHAR(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) apbuart_stop_tx(port); } static irqreturn_t apbuart_int(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned int status; spin_lock(&port->lock); status = UART_GET_STATUS(port); if (status & UART_STATUS_DR) apbuart_rx_chars(port); if (status & UART_STATUS_THE) apbuart_tx_chars(port); spin_unlock(&port->lock); return IRQ_HANDLED; } static unsigned int apbuart_tx_empty(struct uart_port *port) { unsigned int status = UART_GET_STATUS(port); return status & UART_STATUS_THE ? TIOCSER_TEMT : 0; } static unsigned int apbuart_get_mctrl(struct uart_port *port) { /* The GRLIB APBUART handles flow control in hardware */ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; } static void apbuart_set_mctrl(struct uart_port *port, unsigned int mctrl) { /* The GRLIB APBUART handles flow control in hardware */ } static void apbuart_break_ctl(struct uart_port *port, int break_state) { /* We don't support sending break */ } static int apbuart_startup(struct uart_port *port) { int retval; unsigned int cr; /* Allocate the IRQ */ retval = request_irq(port->irq, apbuart_int, 0, "apbuart", port); if (retval) return retval; /* Finally, enable interrupts */ cr = UART_GET_CTRL(port); UART_PUT_CTRL(port, cr | UART_CTRL_RE | UART_CTRL_TE | UART_CTRL_RI | UART_CTRL_TI); return 0; } static void apbuart_shutdown(struct uart_port *port) { unsigned int cr; /* disable all interrupts, disable the port */ cr = UART_GET_CTRL(port); UART_PUT_CTRL(port, cr & ~(UART_CTRL_RE | UART_CTRL_TE | UART_CTRL_RI | UART_CTRL_TI)); /* Free the interrupt */ free_irq(port->irq, port); } static void apbuart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned int cr; unsigned long flags; unsigned int baud, quot; /* Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); if (baud == 0) panic("invalid baudrate %i\n", port->uartclk / 16); /* uart_get_divisor calc a *16 uart freq, apbuart is *8 */ quot = (uart_get_divisor(port, baud)) * 2; cr = UART_GET_CTRL(port); cr &= ~(UART_CTRL_PE | UART_CTRL_PS); if (termios->c_cflag & PARENB) { cr |= UART_CTRL_PE; if ((termios->c_cflag & PARODD)) cr |= UART_CTRL_PS; } /* Enable flow control. */ if (termios->c_cflag & CRTSCTS) cr |= UART_CTRL_FL; spin_lock_irqsave(&port->lock, flags); /* Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); port->read_status_mask = UART_STATUS_OE; if (termios->c_iflag & INPCK) port->read_status_mask |= UART_STATUS_FE | UART_STATUS_PE; /* Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= UART_STATUS_FE | UART_STATUS_PE; /* Ignore all characters if CREAD is not set. */ if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= UART_DUMMY_RSR_RX; /* Set baud rate */ quot -= 1; UART_PUT_SCAL(port, quot); UART_PUT_CTRL(port, cr); spin_unlock_irqrestore(&port->lock, flags); } static const char *apbuart_type(struct uart_port *port) { return port->type == PORT_APBUART ? "GRLIB/APBUART" : NULL; } static void apbuart_release_port(struct uart_port *port) { release_mem_region(port->mapbase, 0x100); } static int apbuart_request_port(struct uart_port *port) { return request_mem_region(port->mapbase, 0x100, "grlib-apbuart") != NULL ? 0 : -EBUSY; return 0; } /* Configure/autoconfigure the port */ static void apbuart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_APBUART; apbuart_request_port(port); } } /* Verify the new serial_struct (for TIOCSSERIAL) */ static int apbuart_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_APBUART) ret = -EINVAL; if (ser->irq < 0 || ser->irq >= NR_IRQS) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } static struct uart_ops grlib_apbuart_ops = { .tx_empty = apbuart_tx_empty, .set_mctrl = apbuart_set_mctrl, .get_mctrl = apbuart_get_mctrl, .stop_tx = apbuart_stop_tx, .start_tx = apbuart_start_tx, .stop_rx = apbuart_stop_rx, .enable_ms = apbuart_enable_ms, .break_ctl = apbuart_break_ctl, .startup = apbuart_startup, .shutdown = apbuart_shutdown, .set_termios = apbuart_set_termios, .type = apbuart_type, .release_port = apbuart_release_port, .request_port = apbuart_request_port, .config_port = apbuart_config_port, .verify_port = apbuart_verify_port, }; static struct uart_port grlib_apbuart_ports[UART_NR]; static struct device_node *grlib_apbuart_nodes[UART_NR]; static int apbuart_scan_fifo_size(struct uart_port *port, int portnumber) { int ctrl, loop = 0; int status; int fifosize; unsigned long flags; ctrl = UART_GET_CTRL(port); /* * Enable the transceiver and wait for it to be ready to send data. * Clear interrupts so that this process will not be externally * interrupted in the middle (which can cause the transceiver to * drain prematurely). */ local_irq_save(flags); UART_PUT_CTRL(port, ctrl | UART_CTRL_TE); while (!UART_TX_READY(UART_GET_STATUS(port))) loop++; /* * Disable the transceiver so data isn't actually sent during the * actual test. */ UART_PUT_CTRL(port, ctrl & ~(UART_CTRL_TE)); fifosize = 1; UART_PUT_CHAR(port, 0); /* * So long as transmitting a character increments the tranceivier FIFO * length the FIFO must be at least that big. These bytes will * automatically drain off of the FIFO. */ status = UART_GET_STATUS(port); while (((status >> 20) & 0x3F) == fifosize) { fifosize++; UART_PUT_CHAR(port, 0); status = UART_GET_STATUS(port); } fifosize--; UART_PUT_CTRL(port, ctrl); local_irq_restore(flags); if (fifosize == 0) fifosize = 1; return fifosize; } static void apbuart_flush_fifo(struct uart_port *port) { int i; for (i = 0; i < port->fifosize; i++) UART_GET_CHAR(port); } /* ======================================================================== */ /* Console driver, if enabled */ /* ======================================================================== */ #ifdef CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE static void apbuart_console_putchar(struct uart_port *port, int ch) { unsigned int status; do { status = UART_GET_STATUS(port); } while (!UART_TX_READY(status)); UART_PUT_CHAR(port, ch); } static void apbuart_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &grlib_apbuart_ports[co->index]; unsigned int status, old_cr, new_cr; /* First save the CR then disable the interrupts */ old_cr = UART_GET_CTRL(port); new_cr = old_cr & ~(UART_CTRL_RI | UART_CTRL_TI); UART_PUT_CTRL(port, new_cr); uart_console_write(port, s, count, apbuart_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the TCR */ do { status = UART_GET_STATUS(port); } while (!UART_TX_READY(status)); UART_PUT_CTRL(port, old_cr); } static void __init apbuart_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) { if (UART_GET_CTRL(port) & (UART_CTRL_RE | UART_CTRL_TE)) { unsigned int quot, status; status = UART_GET_STATUS(port); *parity = 'n'; if (status & UART_CTRL_PE) { if ((status & UART_CTRL_PS) == 0) *parity = 'e'; else *parity = 'o'; } *bits = 8; quot = UART_GET_SCAL(port) / 8; *baud = port->uartclk / (16 * (quot + 1)); } } static int __init apbuart_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 38400; int bits = 8; int parity = 'n'; int flow = 'n'; pr_debug("apbuart_console_setup co=%p, co->index=%i, options=%s\n", co, co->index, options); /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index >= grlib_apbuart_port_nr) co->index = 0; port = &grlib_apbuart_ports[co->index]; spin_lock_init(&port->lock); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else apbuart_console_get_options(port, &baud, &parity, &bits); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver grlib_apbuart_driver; static struct console grlib_apbuart_console = { .name = "ttyS", .write = apbuart_console_write, .device = uart_console_device, .setup = apbuart_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &grlib_apbuart_driver, }; static int grlib_apbuart_configure(void); static int __init apbuart_console_init(void) { if (grlib_apbuart_configure()) return -ENODEV; register_console(&grlib_apbuart_console); return 0; } console_initcall(apbuart_console_init); #define APBUART_CONSOLE (&grlib_apbuart_console) #else #define APBUART_CONSOLE NULL #endif static struct uart_driver grlib_apbuart_driver = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = SERIAL_APBUART_MAJOR, .minor = SERIAL_APBUART_MINOR, .nr = UART_NR, .cons = APBUART_CONSOLE, }; /* ======================================================================== */ /* OF Platform Driver */ /* ======================================================================== */ static int apbuart_probe(struct platform_device *op) { int i; struct uart_port *port = NULL; for (i = 0; i < grlib_apbuart_port_nr; i++) { if (op->dev.of_node == grlib_apbuart_nodes[i]) break; } port = &grlib_apbuart_ports[i]; port->dev = &op->dev; port->irq = op->archdata.irqs[0]; uart_add_one_port(&grlib_apbuart_driver, (struct uart_port *) port); apbuart_flush_fifo((struct uart_port *) port); printk(KERN_INFO "grlib-apbuart at 0x%llx, irq %d\n", (unsigned long long) port->mapbase, port->irq); return 0; } static struct of_device_id apbuart_match[] = { { .name = "GAISLER_APBUART", }, { .name = "01_00c", }, {}, }; static struct platform_driver grlib_apbuart_of_driver = { .probe = apbuart_probe, .driver = { .owner = THIS_MODULE, .name = "grlib-apbuart", .of_match_table = apbuart_match, }, }; static int __init grlib_apbuart_configure(void) { struct device_node *np; int line = 0; for_each_matching_node(np, apbuart_match) { const int *ampopts; const u32 *freq_hz; const struct amba_prom_registers *regs; struct uart_port *port; unsigned long addr; ampopts = of_get_property(np, "ampopts", NULL); if (ampopts && (*ampopts == 0)) continue; /* Ignore if used by another OS instance */ regs = of_get_property(np, "reg", NULL); /* Frequency of APB Bus is frequency of UART */ freq_hz = of_get_property(np, "freq", NULL); if (!regs || !freq_hz || (*freq_hz == 0)) continue; grlib_apbuart_nodes[line] = np; addr = regs->phys_addr; port = &grlib_apbuart_ports[line]; port->mapbase = addr; port->membase = ioremap(addr, sizeof(struct grlib_apbuart_regs_map)); port->irq = 0; port->iotype = UPIO_MEM; port->ops = &grlib_apbuart_ops; port->flags = UPF_BOOT_AUTOCONF; port->line = line; port->uartclk = *freq_hz; port->fifosize = apbuart_scan_fifo_size((struct uart_port *) port, line); line++; /* We support maximum UART_NR uarts ... */ if (line == UART_NR) break; } grlib_apbuart_driver.nr = grlib_apbuart_port_nr = line; return line ? 0 : -ENODEV; } static int __init grlib_apbuart_init(void) { int ret; /* Find all APBUARTS in device the tree and initialize their ports */ ret = grlib_apbuart_configure(); if (ret) return ret; printk(KERN_INFO "Serial: GRLIB APBUART driver\n"); ret = uart_register_driver(&grlib_apbuart_driver); if (ret) { printk(KERN_ERR "%s: uart_register_driver failed (%i)\n", __FILE__, ret); return ret; } ret = platform_driver_register(&grlib_apbuart_of_driver); if (ret) { printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); uart_unregister_driver(&grlib_apbuart_driver); return ret; } return ret; } static void __exit grlib_apbuart_exit(void) { int i; for (i = 0; i < grlib_apbuart_port_nr; i++) uart_remove_one_port(&grlib_apbuart_driver, &grlib_apbuart_ports[i]); uart_unregister_driver(&grlib_apbuart_driver); platform_driver_unregister(&grlib_apbuart_of_driver); } module_init(grlib_apbuart_init); module_exit(grlib_apbuart_exit); MODULE_AUTHOR("Aeroflex Gaisler AB"); MODULE_DESCRIPTION("GRLIB APBUART serial driver"); MODULE_VERSION("2.1"); MODULE_LICENSE("GPL");
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/pinctrl/sh-pfc/pfc-sh7757.c
2172
70701
/* * SH7757 (B0 step) Pinmux * * Copyright (C) 2009-2010 Renesas Solutions Corp. * * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> * * Based on SH7723 Pinmux * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7757.h> #include "sh_pfc.h" enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA, PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA, PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA, PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN, PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN, PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN, PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN, PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN, PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN, PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN, PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN, PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN, PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN, PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN, PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN, PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN, PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN, PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN, PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN, PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN, PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU, PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU, PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU, PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU, PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU, PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU, PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU, PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU, PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU, PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU, PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU, PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU, PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU, PTN4_IN_PU, PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU, PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU, PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU, PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU, PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU, PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTW1_IN_PU, PTW0_IN_PU, PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU, PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU, PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU, PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT, PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT, PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT, PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT, PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT, PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT, PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT, PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT, PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT, PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT, PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT, PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT, PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT, PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT, PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN, PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN, PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN, PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN, PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN, PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN, PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN, PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN, PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN, PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN, PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN, PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN, PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN, PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN, PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN, PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN, PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN, PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN, PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN, PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN, PTN6_FN, PTN5_FN, PTN4_FN, PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN, PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN, PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN, PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN, PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN, PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN, PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN, PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN, PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN, PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN, PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN, PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN, PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN, PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN, PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN, PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN, PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN, PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN, PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN, PS0_15_FN1, PS0_15_FN2, PS0_14_FN1, PS0_14_FN2, PS0_13_FN1, PS0_13_FN2, PS0_12_FN1, PS0_12_FN2, PS0_11_FN1, PS0_11_FN2, PS0_10_FN1, PS0_10_FN2, PS0_9_FN1, PS0_9_FN2, PS0_8_FN1, PS0_8_FN2, PS0_7_FN1, PS0_7_FN2, PS0_6_FN1, PS0_6_FN2, PS0_5_FN1, PS0_5_FN2, PS0_4_FN1, PS0_4_FN2, PS0_3_FN1, PS0_3_FN2, PS0_2_FN1, PS0_2_FN2, PS1_10_FN1, PS1_10_FN2, PS1_9_FN1, PS1_9_FN2, PS1_8_FN1, PS1_8_FN2, PS1_2_FN1, PS1_2_FN2, PS2_13_FN1, PS2_13_FN2, PS2_12_FN1, PS2_12_FN2, PS2_7_FN1, PS2_7_FN2, PS2_6_FN1, PS2_6_FN2, PS2_5_FN1, PS2_5_FN2, PS2_4_FN1, PS2_4_FN2, PS2_2_FN1, PS2_2_FN2, PS3_15_FN1, PS3_15_FN2, PS3_14_FN1, PS3_14_FN2, PS3_13_FN1, PS3_13_FN2, PS3_12_FN1, PS3_12_FN2, PS3_11_FN1, PS3_11_FN2, PS3_10_FN1, PS3_10_FN2, PS3_9_FN1, PS3_9_FN2, PS3_8_FN1, PS3_8_FN2, PS3_7_FN1, PS3_7_FN2, PS3_2_FN1, PS3_2_FN2, PS3_1_FN1, PS3_1_FN2, PS4_14_FN1, PS4_14_FN2, PS4_13_FN1, PS4_13_FN2, PS4_12_FN1, PS4_12_FN2, PS4_10_FN1, PS4_10_FN2, PS4_9_FN1, PS4_9_FN2, PS4_8_FN1, PS4_8_FN2, PS4_4_FN1, PS4_4_FN2, PS4_3_FN1, PS4_3_FN2, PS4_2_FN1, PS4_2_FN2, PS4_1_FN1, PS4_1_FN2, PS4_0_FN1, PS4_0_FN2, PS5_11_FN1, PS5_11_FN2, PS5_10_FN1, PS5_10_FN2, PS5_9_FN1, PS5_9_FN2, PS5_8_FN1, PS5_8_FN2, PS5_7_FN1, PS5_7_FN2, PS5_6_FN1, PS5_6_FN2, PS5_5_FN1, PS5_5_FN2, PS5_4_FN1, PS5_4_FN2, PS5_3_FN1, PS5_3_FN2, PS5_2_FN1, PS5_2_FN2, PS6_15_FN1, PS6_15_FN2, PS6_14_FN1, PS6_14_FN2, PS6_13_FN1, PS6_13_FN2, PS6_12_FN1, PS6_12_FN2, PS6_11_FN1, PS6_11_FN2, PS6_10_FN1, PS6_10_FN2, PS6_9_FN1, PS6_9_FN2, PS6_8_FN1, PS6_8_FN2, PS6_7_FN1, PS6_7_FN2, PS6_6_FN1, PS6_6_FN2, PS6_5_FN1, PS6_5_FN2, PS6_4_FN1, PS6_4_FN2, PS6_3_FN1, PS6_3_FN2, PS6_2_FN1, PS6_2_FN2, PS6_1_FN1, PS6_1_FN2, PS6_0_FN1, PS6_0_FN2, PS7_15_FN1, PS7_15_FN2, PS7_14_FN1, PS7_14_FN2, PS7_13_FN1, PS7_13_FN2, PS7_12_FN1, PS7_12_FN2, PS7_11_FN1, PS7_11_FN2, PS7_10_FN1, PS7_10_FN2, PS7_9_FN1, PS7_9_FN2, PS7_8_FN1, PS7_8_FN2, PS7_7_FN1, PS7_7_FN2, PS7_6_FN1, PS7_6_FN2, PS7_5_FN1, PS7_5_FN2, PS7_4_FN1, PS7_4_FN2, PS8_15_FN1, PS8_15_FN2, PS8_14_FN1, PS8_14_FN2, PS8_13_FN1, PS8_13_FN2, PS8_12_FN1, PS8_12_FN2, PS8_11_FN1, PS8_11_FN2, PS8_10_FN1, PS8_10_FN2, PS8_9_FN1, PS8_9_FN2, PS8_8_FN1, PS8_8_FN2, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /* PTA (mobule: LBSC, RGMII) */ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK, ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK, /* PTB (mobule: INTC, ONFI, TMU) */ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK, IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK, ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK, ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK, /* PTC (mobule: IRQ, PWMU) */ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK, IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK, PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK, PWMU4_MARK, PWMU5_MARK, /* PTD (mobule: SPI0, DMAC) */ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK, SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK, DREQ0_MARK, DACK0_MARK, TEND0_MARK, /* PTE (mobule: RMII) */ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK, RMII0_TXD0_MARK, RMII0_TXEN_MARK, RMII0_REFCLK_MARK, RMII0_RXD1_MARK, RMII0_RXD0_MARK, RMII0_RX_ER_MARK, /* PTF (mobule: RMII, SerMux) */ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK, RMII1_TXD0_MARK, RMII1_TXEN_MARK, RMII1_REFCLK_MARK, RMII1_RXD1_MARK, RMII1_RXD0_MARK, RMII1_RX_ER_MARK, RAC_RI_MARK, /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK, SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK, MMCCLK_MARK, MMCCMD_MARK, /* PTH (mobule: SPI1, LPC, DMAC, ADC) */ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK, SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK, TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK, ADTRG0_MARK, /* PTI (mobule: LBSC, SDHI) */ D15_MARK, D14_MARK, D13_MARK, D12_MARK, D11_MARK, D10_MARK, D9_MARK, D8_MARK, SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK, SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK, /* PTJ (mobule: SCIF234) */ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK, RTS4_MARK, RXD4_MARK, TXD4_MARK, /* PTK (mobule: SERMUX, LBSC, SCIF) */ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK, COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK, SCK2_MARK, SCK4_MARK, SCK3_MARK, /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK, RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK, CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK, TXD2_MARK, /* PTM (mobule: LBSC, IIC) */ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK, SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK, /* PTN (mobule: USB, JMC, SGPIO, WDT) */ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK, JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK, SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK, SGPIO1_DO_MARK, SUB_CLKIN_MARK, /* PTO (mobule: SGPIO, SerMux) */ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK, SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK, SGPIO2_DI_MARK, SGPIO2_DO_MARK, COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK, /* PTQ (mobule: LPC) */ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK, LFRAME_MARK, LRESET_MARK, LCLK_MARK, /* PTR (mobule: GRA, IIC) */ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK, SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, SDA8_MARK, SCL8_MARK, /* PTS (mobule: GRA, IIC) */ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK, SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK, SDA9_MARK, SCL9_MARK, /* PTT (mobule: PWMX, AUD) */ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK, PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, STATUS1_MARK, STATUS0_MARK, /* PTU (mobule: LPC, APM) */ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK, LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK, APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK, APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK, APMS3N_MARK, /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */ A23_MARK, A22_MARK, A21_MARK, A20_MARK, A19_MARK, A18_MARK, A17_MARK, A16_MARK, COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK, R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK, EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK, VBIOS_CLK_MARK, VBIOS_CS_MARK, /* PTW (mobule: LBSC, EVC, SCIF) */ A15_MARK, A14_MARK, A13_MARK, A12_MARK, A11_MARK, A10_MARK, A9_MARK, A8_MARK, EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK, EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK, /* PTX (mobule: LBSC, SCIF, SIM) */ A7_MARK, A6_MARK, A5_MARK, A4_MARK, A3_MARK, A2_MARK, A1_MARK, A0_MARK, RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, /* PTY (mobule: LBSC) */ D7_MARK, D6_MARK, D5_MARK, D4_MARK, D3_MARK, D2_MARK, D1_MARK, D0_MARK, /* PTZ (mobule: eMMC, ONFI) */ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK, MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK, ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK, ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK, PINMUX_MARK_END, }; static const pinmux_enum_t pinmux_data[] = { /* PTA GPIO */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT), /* PTB GPIO */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), /* PTC GPIO */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT), PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), /* PTD GPIO */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT), PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT), PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT), PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT), PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT), PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT), PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT), PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT), /* PTE GPIO */ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT), PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT), PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT), PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT), PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT), PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT), PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT), PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT), /* PTF GPIO */ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT), PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT), PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT), PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT), PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT), PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT), PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT), PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT), /* PTG GPIO */ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT), PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT), PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT), PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT), /* PTH GPIO */ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT), PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT), PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT), PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT), PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT), PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT), PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT), PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT), /* PTI GPIO */ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT), PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT), PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT), PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT), PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT), PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT), PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT), PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT), /* PTJ GPIO */ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT), PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT), PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT), PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT), PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT), PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT), /* PTK GPIO */ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT), PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT), PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT), PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT), PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT), PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT), PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT), /* PTL GPIO */ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT), PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT), PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT), PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT), PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT), PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT), PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT), /* PTM GPIO */ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT), PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT), PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT), PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT), PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT), PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT), PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT), /* PTN GPIO */ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT), PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT), PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT), PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT), PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT), PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT), PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT), /* PTO GPIO */ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT), PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT), PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT), PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT), PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT), PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT), PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT), PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT), /* PTQ GPIO */ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT), PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT), PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT), PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT), PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT), PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT), PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT), /* PTR GPIO */ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT), PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT), PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT), PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT), PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT), PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT), PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT), /* PTS GPIO */ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT), PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT), PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT), PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT), PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT), PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT), PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT), /* PTT GPIO */ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT), PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT), PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT), PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT), PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT), PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT), PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT), /* PTU GPIO */ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT), PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT), PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT), PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT), PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT), PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT), PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT), /* PTV GPIO */ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT), PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT), PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT), PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT), PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT), PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT), PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT), PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT), /* PTW GPIO */ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT), PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT), PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT), PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT), PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT), PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT), PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT), PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT), /* PTX GPIO */ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT), PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT), PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT), PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT), PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT), PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT), PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT), PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT), /* PTY GPIO */ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT), PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT), PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT), PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT), PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT), PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT), PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT), PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT), /* PTZ GPIO */ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT), PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT), PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT), PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT), PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT), PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT), PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT), PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT), /* PTA FN */ PINMUX_DATA(BS_MARK, PTA7_FN), PINMUX_DATA(RDWR_MARK, PTA6_FN), PINMUX_DATA(WE1_MARK, PTA5_FN), PINMUX_DATA(RDY_MARK, PTA4_FN), PINMUX_DATA(ET0_MDC_MARK, PTA3_FN), PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN), PINMUX_DATA(ET1_MDC_MARK, PTA1_FN), PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN), /* PTB FN */ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN), PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN), PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN), PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN), PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN), PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN), PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN), PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN), PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN), PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN), PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN), PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN), PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN), PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN), PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN), PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN), /* PTC FN */ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN), PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN), PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN), PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN), PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN), PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN), PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN), PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN), PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN), PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN), PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN), PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN), PINMUX_DATA(IRQ1_MARK, PTC1_FN), PINMUX_DATA(IRQ0_MARK, PTC0_FN), /* PTD FN */ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN), PINMUX_DATA(SP0_MISO_MARK, PTD6_FN), PINMUX_DATA(SP0_SCK_MARK, PTD5_FN), PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN), PINMUX_DATA(SP0_SS0_MARK, PTD3_FN), PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN), PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN), PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN), PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN), PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN), PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN), /* PTE FN */ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN), PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN), PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN), PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN), PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN), PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN), PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN), PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN), /* PTF FN */ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN), PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN), PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN), PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN), PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN), PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN), PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN), PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN), PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN), /* PTG FN */ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN), PINMUX_DATA(BOOTWP_MARK, PTG6_FN), PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN), PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN), PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN), PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN), PINMUX_DATA(SERIRQ_MARK, PTG3_FN), PINMUX_DATA(WDTOVF_MARK, PTG2_FN), PINMUX_DATA(LPCPD_MARK, PTG1_FN), PINMUX_DATA(LDRQ_MARK, PTG0_FN), /* PTH FN */ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN), PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN), PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN), PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN), PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN), PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN), PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN), PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN), PINMUX_DATA(SP1_SS0_MARK, PTH3_FN), PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN), PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN), PINMUX_DATA(WP_MARK, PTH1_FN), PINMUX_DATA(FMS0_MARK, PTH0_FN), /* PTI FN */ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN), PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN), PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN), PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN), PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN), PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN), PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN), PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN), PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN), PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN), PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN), PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN), PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN), PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN), PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN), PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN), /* PTJ FN */ PINMUX_DATA(RTS3_MARK, PTJ6_FN), PINMUX_DATA(CTS3_MARK, PTJ5_FN), PINMUX_DATA(TXD3_MARK, PTJ4_FN), PINMUX_DATA(RXD3_MARK, PTJ3_FN), PINMUX_DATA(RTS4_MARK, PTJ2_FN), PINMUX_DATA(RXD4_MARK, PTJ1_FN), PINMUX_DATA(TXD4_MARK, PTJ0_FN), /* PTK FN */ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN), PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN), PINMUX_DATA(COM2_RXD_MARK, PTK6_FN), PINMUX_DATA(COM2_RTS_MARK, PTK5_FN), PINMUX_DATA(COM2_CTS_MARK, PTK4_FN), PINMUX_DATA(COM2_DTR_MARK, PTK3_FN), PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN), PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN), PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN), PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN), PINMUX_DATA(CLKOUT_MARK, PTK0_FN), /* PTL FN */ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN), PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN), PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN), PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN), PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN), PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN), PINMUX_DATA(RAC_DTR_MARK, PTL3_FN), PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN), PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN), PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN), PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN), PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN), PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN), /* PTM FN */ PINMUX_DATA(CS4_MARK, PTM7_FN), PINMUX_DATA(RD_MARK, PTM6_FN), PINMUX_DATA(WE0_MARK, PTM7_FN), PINMUX_DATA(CS0_MARK, PTM4_FN), PINMUX_DATA(SDA6_MARK, PTM3_FN), PINMUX_DATA(SCL6_MARK, PTM2_FN), PINMUX_DATA(SDA7_MARK, PTM1_FN), PINMUX_DATA(SCL7_MARK, PTM0_FN), /* PTN FN */ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN), PINMUX_DATA(VBUS_OC_MARK, PTN5_FN), PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN), PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN), PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN), PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN), PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN), PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN), PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN), PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN), PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN), PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN), /* PTO FN */ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN), PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN), PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN), PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN), PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN), PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN), PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN), PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN), PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN), PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN), PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN), PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN), /* PTP FN */ /* PTQ FN */ PINMUX_DATA(LAD3_MARK, PTQ6_FN), PINMUX_DATA(LAD2_MARK, PTQ5_FN), PINMUX_DATA(LAD1_MARK, PTQ4_FN), PINMUX_DATA(LAD0_MARK, PTQ3_FN), PINMUX_DATA(LFRAME_MARK, PTQ2_FN), PINMUX_DATA(LRESET_MARK, PTQ1_FN), PINMUX_DATA(LCLK_MARK, PTQ0_FN), /* PTR FN */ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */ PINMUX_DATA(SDA2_MARK, PTR5_FN), PINMUX_DATA(SCL2_MARK, PTR4_FN), PINMUX_DATA(SDA1_MARK, PTR3_FN), PINMUX_DATA(SCL1_MARK, PTR2_FN), PINMUX_DATA(SDA0_MARK, PTR1_FN), PINMUX_DATA(SCL0_MARK, PTR0_FN), /* PTS FN */ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */ PINMUX_DATA(SDA5_MARK, PTS5_FN), PINMUX_DATA(SCL5_MARK, PTS4_FN), PINMUX_DATA(SDA4_MARK, PTS3_FN), PINMUX_DATA(SCL4_MARK, PTS2_FN), PINMUX_DATA(SDA3_MARK, PTS1_FN), PINMUX_DATA(SCL3_MARK, PTS0_FN), /* PTT FN */ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN), PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN), PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN), PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN), PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN), PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN), PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN), PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN), PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN), PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN), PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN), PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN), PINMUX_DATA(PWMX1_MARK, PTT1_FN), PINMUX_DATA(PWMX0_MARK, PTT0_FN), /* PTU FN */ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN), PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN), PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN), PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN), PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN), PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN), PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN), PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN), PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN), PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN), PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN), PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN), PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN), PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN), PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN), PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN), /* PTV FN */ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN), PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN), PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN), PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN), PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN), PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN), PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN), PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN), PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN), PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN), PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN), PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN), PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN), PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN), PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN), PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN), /* PTW FN */ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN), PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN), PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN), PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN), PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN), PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN), PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN), PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN), PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN), PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN), PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN), PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN), PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN), PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN), PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN), PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN), /* PTX FN */ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN), PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN), PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN), PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN), PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN), PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN), PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN), PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN), PINMUX_DATA(A3_MARK, PTX3_FN), PINMUX_DATA(A2_MARK, PTX2_FN), PINMUX_DATA(A1_MARK, PTX1_FN), PINMUX_DATA(A0_MARK, PTX0_FN), /* PTY FN */ PINMUX_DATA(D7_MARK, PTY7_FN), PINMUX_DATA(D6_MARK, PTY6_FN), PINMUX_DATA(D5_MARK, PTY5_FN), PINMUX_DATA(D4_MARK, PTY4_FN), PINMUX_DATA(D3_MARK, PTY3_FN), PINMUX_DATA(D2_MARK, PTY2_FN), PINMUX_DATA(D1_MARK, PTY1_FN), PINMUX_DATA(D0_MARK, PTY0_FN), /* PTZ FN */ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN), PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN), PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN), PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN), PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN), PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN), PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN), PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN), PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN), PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN), PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN), PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN), PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN), PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN), PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN), PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN), }; static struct sh_pfc_pin pinmux_pins[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC6, PTC6_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC1, PTC1_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE3, PTE3_DATA), PINMUX_GPIO(GPIO_PTE2, PTE2_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA), PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA), PINMUX_GPIO(GPIO_PTG6, PTG6_DATA), PINMUX_GPIO(GPIO_PTG5, PTG5_DATA), PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTI */ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA), PINMUX_GPIO(GPIO_PTI6, PTI6_DATA), PINMUX_GPIO(GPIO_PTI5, PTI5_DATA), PINMUX_GPIO(GPIO_PTI4, PTI4_DATA), PINMUX_GPIO(GPIO_PTI3, PTI3_DATA), PINMUX_GPIO(GPIO_PTI2, PTI2_DATA), PINMUX_GPIO(GPIO_PTI1, PTI1_DATA), PINMUX_GPIO(GPIO_PTI0, PTI0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA), PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA), PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA), PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTN */ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), /* PTO */ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA), PINMUX_GPIO(GPIO_PTO6, PTO6_DATA), PINMUX_GPIO(GPIO_PTO5, PTO5_DATA), PINMUX_GPIO(GPIO_PTO4, PTO4_DATA), PINMUX_GPIO(GPIO_PTO3, PTO3_DATA), PINMUX_GPIO(GPIO_PTO2, PTO2_DATA), PINMUX_GPIO(GPIO_PTO1, PTO1_DATA), PINMUX_GPIO(GPIO_PTO0, PTO0_DATA), /* PTP */ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA), PINMUX_GPIO(GPIO_PTP6, PTP6_DATA), PINMUX_GPIO(GPIO_PTP5, PTP5_DATA), PINMUX_GPIO(GPIO_PTP4, PTP4_DATA), PINMUX_GPIO(GPIO_PTP3, PTP3_DATA), PINMUX_GPIO(GPIO_PTP2, PTP2_DATA), PINMUX_GPIO(GPIO_PTP1, PTP1_DATA), PINMUX_GPIO(GPIO_PTP0, PTP0_DATA), /* PTQ */ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA), PINMUX_GPIO(GPIO_PTR6, PTR6_DATA), PINMUX_GPIO(GPIO_PTR5, PTR5_DATA), PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA), PINMUX_GPIO(GPIO_PTS6, PTS6_DATA), PINMUX_GPIO(GPIO_PTS5, PTS5_DATA), PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA), PINMUX_GPIO(GPIO_PTT6, PTT6_DATA), PINMUX_GPIO(GPIO_PTT5, PTT5_DATA), PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA), PINMUX_GPIO(GPIO_PTU6, PTU6_DATA), PINMUX_GPIO(GPIO_PTU5, PTU5_DATA), PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA), PINMUX_GPIO(GPIO_PTV6, PTV6_DATA), PINMUX_GPIO(GPIO_PTV5, PTV5_DATA), PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* PTW */ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA), PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), /* PTX */ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA), PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), /* PTY */ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA), PINMUX_GPIO(GPIO_PTY6, PTY6_DATA), PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), /* PTZ */ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA), PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA), PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA), }; #define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins) static const struct pinmux_func pinmux_func_gpios[] = { /* PTA (mobule: LBSC, RGMII) */ GPIO_FN(BS), GPIO_FN(RDWR), GPIO_FN(WE1), GPIO_FN(RDY), GPIO_FN(ET0_MDC), GPIO_FN(ET0_MDIO), GPIO_FN(ET1_MDC), GPIO_FN(ET1_MDIO), /* PTB (mobule: INTC, ONFI, TMU) */ GPIO_FN(IRQ15), GPIO_FN(IRQ14), GPIO_FN(IRQ13), GPIO_FN(IRQ12), GPIO_FN(IRQ11), GPIO_FN(IRQ10), GPIO_FN(IRQ9), GPIO_FN(IRQ8), GPIO_FN(ON_NRE), GPIO_FN(ON_NWE), GPIO_FN(ON_NWP), GPIO_FN(ON_NCE0), GPIO_FN(ON_R_B0), GPIO_FN(ON_ALE), GPIO_FN(ON_CLE), GPIO_FN(TCLK), /* PTC (mobule: IRQ, PWMU) */ GPIO_FN(IRQ7), GPIO_FN(IRQ6), GPIO_FN(IRQ5), GPIO_FN(IRQ4), GPIO_FN(IRQ3), GPIO_FN(IRQ2), GPIO_FN(IRQ1), GPIO_FN(IRQ0), GPIO_FN(PWMU0), GPIO_FN(PWMU1), GPIO_FN(PWMU2), GPIO_FN(PWMU3), GPIO_FN(PWMU4), GPIO_FN(PWMU5), /* PTD (mobule: SPI0, DMAC) */ GPIO_FN(SP0_MOSI), GPIO_FN(SP0_MISO), GPIO_FN(SP0_SCK), GPIO_FN(SP0_SCK_FB), GPIO_FN(SP0_SS0), GPIO_FN(SP0_SS1), GPIO_FN(SP0_SS2), GPIO_FN(SP0_SS3), GPIO_FN(DREQ0), GPIO_FN(DACK0), GPIO_FN(TEND0), /* PTE (mobule: RMII) */ GPIO_FN(RMII0_CRS_DV), GPIO_FN(RMII0_TXD1), GPIO_FN(RMII0_TXD0), GPIO_FN(RMII0_TXEN), GPIO_FN(RMII0_REFCLK), GPIO_FN(RMII0_RXD1), GPIO_FN(RMII0_RXD0), GPIO_FN(RMII0_RX_ER), /* PTF (mobule: RMII, SerMux) */ GPIO_FN(RMII1_CRS_DV), GPIO_FN(RMII1_TXD1), GPIO_FN(RMII1_TXD0), GPIO_FN(RMII1_TXEN), GPIO_FN(RMII1_REFCLK), GPIO_FN(RMII1_RXD1), GPIO_FN(RMII1_RXD0), GPIO_FN(RMII1_RX_ER), GPIO_FN(RAC_RI), /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */ GPIO_FN(BOOTFMS), GPIO_FN(BOOTWP), GPIO_FN(A25), GPIO_FN(A24), GPIO_FN(SERIRQ), GPIO_FN(WDTOVF), GPIO_FN(LPCPD), GPIO_FN(LDRQ), GPIO_FN(MMCCLK), GPIO_FN(MMCCMD), /* PTH (mobule: SPI1, LPC, DMAC, ADC) */ GPIO_FN(SP1_MOSI), GPIO_FN(SP1_MISO), GPIO_FN(SP1_SCK), GPIO_FN(SP1_SCK_FB), GPIO_FN(SP1_SS0), GPIO_FN(SP1_SS1), GPIO_FN(WP), GPIO_FN(FMS0), GPIO_FN(TEND1), GPIO_FN(DREQ1), GPIO_FN(DACK1), GPIO_FN(ADTRG1), GPIO_FN(ADTRG0), /* PTI (mobule: LBSC, SDHI) */ GPIO_FN(D15), GPIO_FN(D14), GPIO_FN(D13), GPIO_FN(D12), GPIO_FN(D11), GPIO_FN(D10), GPIO_FN(D9), GPIO_FN(D8), GPIO_FN(SD_WP), GPIO_FN(SD_CD), GPIO_FN(SD_CLK), GPIO_FN(SD_CMD), GPIO_FN(SD_D3), GPIO_FN(SD_D2), GPIO_FN(SD_D1), GPIO_FN(SD_D0), /* PTJ (mobule: SCIF234, SERMUX) */ GPIO_FN(RTS3), GPIO_FN(CTS3), GPIO_FN(TXD3), GPIO_FN(RXD3), GPIO_FN(RTS4), GPIO_FN(RXD4), GPIO_FN(TXD4), /* PTK (mobule: SERMUX, LBSC, SCIF) */ GPIO_FN(COM2_TXD), GPIO_FN(COM2_RXD), GPIO_FN(COM2_RTS), GPIO_FN(COM2_CTS), GPIO_FN(COM2_DTR), GPIO_FN(COM2_DSR), GPIO_FN(COM2_DCD), GPIO_FN(CLKOUT), GPIO_FN(SCK2), GPIO_FN(SCK4), GPIO_FN(SCK3), /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */ GPIO_FN(RAC_RXD), GPIO_FN(RAC_RTS), GPIO_FN(RAC_CTS), GPIO_FN(RAC_DTR), GPIO_FN(RAC_DSR), GPIO_FN(RAC_DCD), GPIO_FN(RAC_TXD), GPIO_FN(RXD2), GPIO_FN(CS5), GPIO_FN(CS6), GPIO_FN(AUDSYNC), GPIO_FN(AUDCK), GPIO_FN(TXD2), /* PTM (mobule: LBSC, IIC) */ GPIO_FN(CS4), GPIO_FN(RD), GPIO_FN(WE0), GPIO_FN(CS0), GPIO_FN(SDA6), GPIO_FN(SCL6), GPIO_FN(SDA7), GPIO_FN(SCL7), /* PTN (mobule: USB, JMC, SGPIO, WDT) */ GPIO_FN(VBUS_EN), GPIO_FN(VBUS_OC), GPIO_FN(JMCTCK), GPIO_FN(JMCTMS), GPIO_FN(JMCTDO), GPIO_FN(JMCTDI), GPIO_FN(JMCTRST), GPIO_FN(SGPIO1_CLK), GPIO_FN(SGPIO1_LOAD), GPIO_FN(SGPIO1_DI), GPIO_FN(SGPIO1_DO), GPIO_FN(SUB_CLKIN), /* PTO (mobule: SGPIO, SerMux) */ GPIO_FN(SGPIO0_CLK), GPIO_FN(SGPIO0_LOAD), GPIO_FN(SGPIO0_DI), GPIO_FN(SGPIO0_DO), GPIO_FN(SGPIO2_CLK), GPIO_FN(SGPIO2_LOAD), GPIO_FN(SGPIO2_DI), GPIO_FN(SGPIO2_DO), GPIO_FN(COM1_TXD), GPIO_FN(COM1_RXD), GPIO_FN(COM1_RTS), GPIO_FN(COM1_CTS), /* PTP (mobule: EVC, ADC) */ /* PTQ (mobule: LPC) */ GPIO_FN(LAD3), GPIO_FN(LAD2), GPIO_FN(LAD1), GPIO_FN(LAD0), GPIO_FN(LFRAME), GPIO_FN(LRESET), GPIO_FN(LCLK), /* PTR (mobule: GRA, IIC) */ GPIO_FN(DDC3), GPIO_FN(DDC2), GPIO_FN(SDA8), GPIO_FN(SCL8), GPIO_FN(SDA2), GPIO_FN(SCL2), GPIO_FN(SDA1), GPIO_FN(SCL1), GPIO_FN(SDA0), GPIO_FN(SCL0), /* PTS (mobule: GRA, IIC) */ GPIO_FN(DDC1), GPIO_FN(DDC0), GPIO_FN(SDA9), GPIO_FN(SCL9), GPIO_FN(SDA5), GPIO_FN(SCL5), GPIO_FN(SDA4), GPIO_FN(SCL4), GPIO_FN(SDA3), GPIO_FN(SCL3), /* PTT (mobule: PWMX, AUD) */ GPIO_FN(PWMX7), GPIO_FN(PWMX6), GPIO_FN(PWMX5), GPIO_FN(PWMX4), GPIO_FN(PWMX3), GPIO_FN(PWMX2), GPIO_FN(PWMX1), GPIO_FN(PWMX0), GPIO_FN(AUDATA3), GPIO_FN(AUDATA2), GPIO_FN(AUDATA1), GPIO_FN(AUDATA0), GPIO_FN(STATUS1), GPIO_FN(STATUS0), /* PTU (mobule: LPC, APM) */ GPIO_FN(LGPIO7), GPIO_FN(LGPIO6), GPIO_FN(LGPIO5), GPIO_FN(LGPIO4), GPIO_FN(LGPIO3), GPIO_FN(LGPIO2), GPIO_FN(LGPIO1), GPIO_FN(LGPIO0), GPIO_FN(APMONCTL_O), GPIO_FN(APMPWBTOUT_O), GPIO_FN(APMSCI_O), GPIO_FN(APMVDDON), GPIO_FN(APMSLPBTN), GPIO_FN(APMPWRBTN), GPIO_FN(APMS5N), GPIO_FN(APMS3N), /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */ GPIO_FN(A23), GPIO_FN(A22), GPIO_FN(A21), GPIO_FN(A20), GPIO_FN(A19), GPIO_FN(A18), GPIO_FN(A17), GPIO_FN(A16), GPIO_FN(COM2_RI), GPIO_FN(R_SPI_MOSI), GPIO_FN(R_SPI_MISO), GPIO_FN(R_SPI_RSPCK), GPIO_FN(R_SPI_SSL0), GPIO_FN(R_SPI_SSL1), GPIO_FN(EVENT7), GPIO_FN(EVENT6), GPIO_FN(VBIOS_DI), GPIO_FN(VBIOS_DO), GPIO_FN(VBIOS_CLK), GPIO_FN(VBIOS_CS), /* PTW (mobule: LBSC, EVC, SCIF) */ GPIO_FN(A16), GPIO_FN(A15), GPIO_FN(A14), GPIO_FN(A13), GPIO_FN(A12), GPIO_FN(A11), GPIO_FN(A10), GPIO_FN(A9), GPIO_FN(A8), GPIO_FN(EVENT5), GPIO_FN(EVENT4), GPIO_FN(EVENT3), GPIO_FN(EVENT2), GPIO_FN(EVENT1), GPIO_FN(EVENT0), GPIO_FN(CTS4), GPIO_FN(CTS2), /* PTX (mobule: LBSC) */ GPIO_FN(A7), GPIO_FN(A6), GPIO_FN(A5), GPIO_FN(A4), GPIO_FN(A3), GPIO_FN(A2), GPIO_FN(A1), GPIO_FN(A0), GPIO_FN(RTS2), GPIO_FN(SIM_D), GPIO_FN(SIM_CLK), GPIO_FN(SIM_RST), /* PTY (mobule: LBSC) */ GPIO_FN(D7), GPIO_FN(D6), GPIO_FN(D5), GPIO_FN(D4), GPIO_FN(D3), GPIO_FN(D2), GPIO_FN(D1), GPIO_FN(D0), /* PTZ (mobule: eMMC, ONFI) */ GPIO_FN(MMCDAT7), GPIO_FN(MMCDAT6), GPIO_FN(MMCDAT5), GPIO_FN(MMCDAT4), GPIO_FN(MMCDAT3), GPIO_FN(MMCDAT2), GPIO_FN(MMCDAT1), GPIO_FN(MMCDAT0), GPIO_FN(ON_DQ7), GPIO_FN(ON_DQ6), GPIO_FN(ON_DQ5), GPIO_FN(ON_DQ4), GPIO_FN(ON_DQ3), GPIO_FN(ON_DQ2), GPIO_FN(ON_DQ1), GPIO_FN(ON_DQ0), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) { PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU, PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU, PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU, PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU, PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU, PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU, PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU, PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU } }, { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) { PTB7_FN, PTB7_OUT, PTB7_IN, 0, PTB6_FN, PTB6_OUT, PTB6_IN, 0, PTB5_FN, PTB5_OUT, PTB5_IN, 0, PTB4_FN, PTB4_OUT, PTB4_IN, 0, PTB3_FN, PTB3_OUT, PTB3_IN, 0, PTB2_FN, PTB2_OUT, PTB2_IN, 0, PTB1_FN, PTB1_OUT, PTB1_IN, 0, PTB0_FN, PTB0_OUT, PTB0_IN, 0 } }, { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) { PTC7_FN, PTC7_OUT, PTC7_IN, 0, PTC6_FN, PTC6_OUT, PTC6_IN, 0, PTC5_FN, PTC5_OUT, PTC5_IN, 0, PTC4_FN, PTC4_OUT, PTC4_IN, 0, PTC3_FN, PTC3_OUT, PTC3_IN, 0, PTC2_FN, PTC2_OUT, PTC2_IN, 0, PTC1_FN, PTC1_OUT, PTC1_IN, 0, PTC0_FN, PTC0_OUT, PTC0_IN, 0 } }, { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) { PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU, PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU, PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU, PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU, PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU, PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU, PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU, PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU } }, { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) { PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU, PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU, PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU, PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU, PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU, PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU, PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU, PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU } }, { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) { PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU, PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU, PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU, PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU, PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU, PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU, PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU, PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU } }, { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) { PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU , PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU , PTG5_FN, PTG5_OUT, PTG5_IN, 0, PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU , PTG3_FN, PTG3_OUT, PTG3_IN, 0, PTG2_FN, PTG2_OUT, PTG2_IN, 0, PTG1_FN, PTG1_OUT, PTG1_IN, 0, PTG0_FN, PTG0_OUT, PTG0_IN, 0 } }, { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) { PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU, PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU, PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU, PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU, PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU, PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU, PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU, PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU } }, { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) { PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU, PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU, PTI5_FN, PTI5_OUT, PTI5_IN, 0, PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU, PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU, PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU, PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU, PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU } }, { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU, PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU, PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU, PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU, PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU, PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU, PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU } }, { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) { PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU, PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU, PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU, PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU, PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU, PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU, PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU, PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU } }, { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU, PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU, PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU, PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU, PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU, PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU, PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU } }, { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) { PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU, PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU, PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU, PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU, PTM3_FN, PTM3_OUT, PTM3_IN, 0, PTM2_FN, PTM2_OUT, PTM2_IN, 0, PTM1_FN, PTM1_OUT, PTM1_IN, 0, PTM0_FN, PTM0_OUT, PTM0_IN, 0 } }, { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTN6_FN, PTN6_OUT, PTN6_IN, 0, PTN5_FN, PTN5_OUT, PTN5_IN, 0, PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU, PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU, PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU, PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU, PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU } }, { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) { PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU, PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU, PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU, PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU, PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU, PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU, PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU, PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU } }, #if 0 /* FIXME: Remove it? */ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTP6_FN, PTP6_OUT, PTP6_IN, 0, PTP5_FN, PTP5_OUT, PTP5_IN, 0, PTP4_FN, PTP4_OUT, PTP4_IN, 0, PTP3_FN, PTP3_OUT, PTP3_IN, 0, PTP2_FN, PTP2_OUT, PTP2_IN, 0, PTP1_FN, PTP1_OUT, PTP1_IN, 0, PTP0_FN, PTP0_OUT, PTP0_IN, 0 } }, #endif { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0, PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0, PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0, PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0, PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0, PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0, PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 } }, { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) { PTR7_FN, PTR7_OUT, PTR7_IN, 0, PTR6_FN, PTR6_OUT, PTR6_IN, 0, PTR5_FN, PTR5_OUT, PTR5_IN, 0, PTR4_FN, PTR4_OUT, PTR4_IN, 0, PTR3_FN, PTR3_OUT, PTR3_IN, 0, PTR2_FN, PTR2_OUT, PTR2_IN, 0, PTR1_FN, PTR1_OUT, PTR1_IN, 0, PTR0_FN, PTR0_OUT, PTR0_IN, 0 } }, { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) { PTS7_FN, PTS7_OUT, PTS7_IN, 0, PTS6_FN, PTS6_OUT, PTS6_IN, 0, PTS5_FN, PTS5_OUT, PTS5_IN, 0, PTS4_FN, PTS4_OUT, PTS4_IN, 0, PTS3_FN, PTS3_OUT, PTS3_IN, 0, PTS2_FN, PTS2_OUT, PTS2_IN, 0, PTS1_FN, PTS1_OUT, PTS1_IN, 0, PTS0_FN, PTS0_OUT, PTS0_IN, 0 } }, { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) { PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU, PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU, PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU, PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU, PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU, PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU, PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU, PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU } }, { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) { PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU, PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU, PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU, PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU, PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU, PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU, PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU, PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU } }, { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) { PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU, PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU, PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU, PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU, PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU, PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU, PTV1_FN, PTV1_OUT, PTV1_IN, 0, PTV0_FN, PTV0_OUT, PTV0_IN, 0 } }, { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) { PTW7_FN, PTW7_OUT, PTW7_IN, 0, PTW6_FN, PTW6_OUT, PTW6_IN, 0, PTW5_FN, PTW5_OUT, PTW5_IN, 0, PTW4_FN, PTW4_OUT, PTW4_IN, 0, PTW3_FN, PTW3_OUT, PTW3_IN, 0, PTW2_FN, PTW2_OUT, PTW2_IN, 0, PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU, PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU } }, { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) { PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU, PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU, PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU, PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU, PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU, PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU, PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU, PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU } }, { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) { PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU, PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU, PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU, PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU, PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU, PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU, PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU, PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU } }, { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) { PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0, PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0, PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0, PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0, PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0, PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0, PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0, PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 } }, { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) { PS0_15_FN1, PS0_15_FN2, PS0_14_FN1, PS0_14_FN2, PS0_13_FN1, PS0_13_FN2, PS0_12_FN1, PS0_12_FN2, PS0_11_FN1, PS0_11_FN2, PS0_10_FN1, PS0_10_FN2, PS0_9_FN1, PS0_9_FN2, PS0_8_FN1, PS0_8_FN2, PS0_7_FN1, PS0_7_FN2, PS0_6_FN1, PS0_6_FN2, PS0_5_FN1, PS0_5_FN2, PS0_4_FN1, PS0_4_FN2, PS0_3_FN1, PS0_3_FN2, PS0_2_FN1, PS0_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PS1_10_FN1, PS1_10_FN2, PS1_9_FN1, PS1_9_FN2, PS1_8_FN1, PS1_8_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PS1_2_FN1, PS1_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) { 0, 0, 0, 0, PS2_13_FN1, PS2_13_FN2, PS2_12_FN1, PS2_12_FN2, 0, 0, 0, 0, 0, 0, 0, 0, PS2_7_FN1, PS2_7_FN2, PS2_6_FN1, PS2_6_FN2, PS2_5_FN1, PS2_5_FN2, PS2_4_FN1, PS2_4_FN2, 0, 0, PS2_2_FN1, PS2_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) { PS3_15_FN1, PS3_15_FN2, PS3_14_FN1, PS3_14_FN2, PS3_13_FN1, PS3_13_FN2, PS3_12_FN1, PS3_12_FN2, PS3_11_FN1, PS3_11_FN2, PS3_10_FN1, PS3_10_FN2, PS3_9_FN1, PS3_9_FN2, PS3_8_FN1, PS3_8_FN2, PS3_7_FN1, PS3_7_FN2, 0, 0, 0, 0, 0, 0, 0, 0, PS3_2_FN1, PS3_2_FN2, PS3_1_FN1, PS3_1_FN2, 0, 0, } }, { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) { 0, 0, PS4_14_FN1, PS4_14_FN2, PS4_13_FN1, PS4_13_FN2, PS4_12_FN1, PS4_12_FN2, 0, 0, PS4_10_FN1, PS4_10_FN2, PS4_9_FN1, PS4_9_FN2, PS4_8_FN1, PS4_8_FN2, 0, 0, 0, 0, 0, 0, PS4_4_FN1, PS4_4_FN2, PS4_3_FN1, PS4_3_FN2, PS4_2_FN1, PS4_2_FN2, PS4_1_FN1, PS4_1_FN2, PS4_0_FN1, PS4_0_FN2, } }, { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, PS5_11_FN1, PS5_11_FN2, PS5_10_FN1, PS5_10_FN2, PS5_9_FN1, PS5_9_FN2, PS5_8_FN1, PS5_8_FN2, PS5_7_FN1, PS5_7_FN2, PS5_6_FN1, PS5_6_FN2, PS5_5_FN1, PS5_5_FN2, PS5_4_FN1, PS5_4_FN2, PS5_3_FN1, PS5_3_FN2, PS5_2_FN1, PS5_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) { PS6_15_FN1, PS6_15_FN2, PS6_14_FN1, PS6_14_FN2, PS6_13_FN1, PS6_13_FN2, PS6_12_FN1, PS6_12_FN2, PS6_11_FN1, PS6_11_FN2, PS6_10_FN1, PS6_10_FN2, PS6_9_FN1, PS6_9_FN2, PS6_8_FN1, PS6_8_FN2, PS6_7_FN1, PS6_7_FN2, PS6_6_FN1, PS6_6_FN2, PS6_5_FN1, PS6_5_FN2, PS6_4_FN1, PS6_4_FN2, PS6_3_FN1, PS6_3_FN2, PS6_2_FN1, PS6_2_FN2, PS6_1_FN1, PS6_1_FN2, PS6_0_FN1, PS6_0_FN2, } }, { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) { PS7_15_FN1, PS7_15_FN2, PS7_14_FN1, PS7_14_FN2, PS7_13_FN1, PS7_13_FN2, PS7_12_FN1, PS7_12_FN2, PS7_11_FN1, PS7_11_FN2, PS7_10_FN1, PS7_10_FN2, PS7_9_FN1, PS7_9_FN2, PS7_8_FN1, PS7_8_FN2, PS7_7_FN1, PS7_7_FN2, PS7_6_FN1, PS7_6_FN2, PS7_5_FN1, PS7_5_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) { PS8_15_FN1, PS8_15_FN2, PS8_14_FN1, PS8_14_FN2, PS8_13_FN1, PS8_13_FN2, PS8_12_FN1, PS8_12_FN2, PS8_11_FN1, PS8_11_FN2, PS8_10_FN1, PS8_10_FN2, PS8_9_FN1, PS8_9_FN2, PS8_8_FN1, PS8_8_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, {} }; static const struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xffec0034, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) { PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) { PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) { PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) { PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) { 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) { PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) { 0, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) { 0, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PODR", 0xffec0050, 8) { PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA } }, { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) { PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) { 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) { PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) { PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) { PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) { PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) { PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) { PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) { PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) { PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) { PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; const struct sh_pfc_soc_info sh7757_pinmux_info = { .name = "sh7757_pfc", .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .func_gpios = pinmux_func_gpios, .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios), .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), };
gpl-2.0
muddy1/herckernels
arch/arm/mach-omap1/board-h2.c
2428
10998
/* * linux/arch/arm/mach-omap1/board-h2.c * * Board specific inits for OMAP-1610 H2 * * Copyright (C) 2001 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * Copyright (C) 2002 MontaVista Software, Inc. * * Separated FPGA interrupts from innovator1510.c and cleaned up for 2.6 * Copyright (C) 2004 Nokia Corporation by Tony Lindrgen <tony@atomide.com> * * H2 specific changes and cleanup * Copyright (C) 2004 Nokia Corporation by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/i2c/tps65010.h> #include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/mux.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/irda.h> #include <plat/usb.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/flash.h> #include "board-h2.h" /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */ #define OMAP1610_ETHR_START 0x04000300 static const unsigned int h2_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(1, 0, KEY_RIGHT), KEY(2, 0, KEY_3), KEY(3, 0, KEY_F10), KEY(4, 0, KEY_F5), KEY(5, 0, KEY_9), KEY(0, 1, KEY_DOWN), KEY(1, 1, KEY_UP), KEY(2, 1, KEY_2), KEY(3, 1, KEY_F9), KEY(4, 1, KEY_F7), KEY(5, 1, KEY_0), KEY(0, 2, KEY_ENTER), KEY(1, 2, KEY_6), KEY(2, 2, KEY_1), KEY(3, 2, KEY_F2), KEY(4, 2, KEY_F6), KEY(5, 2, KEY_HOME), KEY(0, 3, KEY_8), KEY(1, 3, KEY_5), KEY(2, 3, KEY_F12), KEY(3, 3, KEY_F3), KEY(4, 3, KEY_F8), KEY(5, 3, KEY_END), KEY(0, 4, KEY_7), KEY(1, 4, KEY_4), KEY(2, 4, KEY_F11), KEY(3, 4, KEY_F1), KEY(4, 4, KEY_F4), KEY(5, 4, KEY_ESC), KEY(0, 5, KEY_F13), KEY(1, 5, KEY_F14), KEY(2, 5, KEY_F15), KEY(3, 5, KEY_F16), KEY(4, 5, KEY_SLEEP), }; static struct mtd_partition h2_nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* file system */ { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct physmap_flash_data h2_nor_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = h2_nor_partitions, .nr_parts = ARRAY_SIZE(h2_nor_partitions), }; static struct resource h2_nor_resource = { /* This is on CS3, wherever it's mapped */ .flags = IORESOURCE_MEM, }; static struct platform_device h2_nor_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &h2_nor_data, }, .num_resources = 1, .resource = &h2_nor_resource, }; static struct mtd_partition h2_nand_partitions[] = { #if 0 /* REVISIT: enable these partitions if you make NAND BOOT * work on your H2 (rev C or newer); published versions of * x-load only support P2 and H3. */ { .name = "xloader", .offset = 0, .size = 64 * 1024, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "bootloader", .offset = MTDPART_OFS_APPEND, .size = 256 * 1024, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "params", .offset = MTDPART_OFS_APPEND, .size = 192 * 1024, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = 2 * SZ_1M, }, #endif { .name = "filesystem", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, }, }; static void h2_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long mask; if (cmd == NAND_CMD_NONE) return; mask = (ctrl & NAND_CLE) ? 0x02 : 0; if (ctrl & NAND_ALE) mask |= 0x04; writeb(cmd, (unsigned long)this->IO_ADDR_W | mask); } #define H2_NAND_RB_GPIO_PIN 62 static int h2_nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(H2_NAND_RB_GPIO_PIN); } static const char *h2_part_probes[] = { "cmdlinepart", NULL }; static struct platform_nand_data h2_nand_platdata = { .chip = { .nr_chips = 1, .chip_offset = 0, .nr_partitions = ARRAY_SIZE(h2_nand_partitions), .partitions = h2_nand_partitions, .options = NAND_SAMSUNG_LP_OPTIONS, .part_probe_types = h2_part_probes, }, .ctrl = { .cmd_ctrl = h2_nand_cmd_ctl, .dev_ready = h2_nand_dev_ready, }, }; static struct resource h2_nand_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device h2_nand_device = { .name = "gen_nand", .id = 0, .dev = { .platform_data = &h2_nand_platdata, }, .num_resources = 1, .resource = &h2_nand_resource, }; static struct smc91x_platdata h2_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource h2_smc91x_resources[] = { [0] = { .start = OMAP1610_ETHR_START, /* Physical */ .end = OMAP1610_ETHR_START + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .start = OMAP_GPIO_IRQ(0), .end = OMAP_GPIO_IRQ(0), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device h2_smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &h2_smc91x_info, }, .num_resources = ARRAY_SIZE(h2_smc91x_resources), .resource = h2_smc91x_resources, }; static struct resource h2_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data h2_keymap_data = { .keymap = h2_keymap, .keymap_size = ARRAY_SIZE(h2_keymap), }; static struct omap_kp_platform_data h2_kp_data = { .rows = 8, .cols = 8, .keymap_data = &h2_keymap_data, .rep = true, .delay = 9, .dbounce = true, }; static struct platform_device h2_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &h2_kp_data, }, .num_resources = ARRAY_SIZE(h2_kp_resources), .resource = h2_kp_resources, }; #define H2_IRDA_FIRSEL_GPIO_PIN 17 static struct omap_irda_config h2_irda_data = { .transceiver_cap = IR_SIRMODE | IR_MIRMODE | IR_FIRMODE, .rx_channel = OMAP_DMA_UART3_RX, .tx_channel = OMAP_DMA_UART3_TX, .dest_start = UART3_THR, .src_start = UART3_RHR, .tx_trigger = 0, .rx_trigger = 0, }; static struct resource h2_irda_resources[] = { [0] = { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, }; static u64 irda_dmamask = 0xffffffff; static struct platform_device h2_irda_device = { .name = "omapirda", .id = 0, .dev = { .platform_data = &h2_irda_data, .dma_mask = &irda_dmamask, }, .num_resources = ARRAY_SIZE(h2_irda_resources), .resource = h2_irda_resources, }; static struct platform_device h2_lcd_device = { .name = "lcd_h2", .id = -1, }; static struct platform_device *h2_devices[] __initdata = { &h2_nor_device, &h2_nand_device, &h2_smc91x_device, &h2_irda_device, &h2_kp_device, &h2_lcd_device, }; static void __init h2_init_smc91x(void) { if (gpio_request(0, "SMC91x irq") < 0) { printk("Error requesting gpio 0 for smc91x irq\n"); return; } } static int tps_setup(struct i2c_client *client, void *context) { tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V | TPS_LDO1_ENABLE | TPS_VLDO1_3_0V); return 0; } static struct tps65010_board tps_board = { .base = H2_TPS_GPIO_BASE, .outmask = 0x0f, .setup = tps_setup, }; static struct i2c_board_info __initdata h2_i2c_board_info[] = { { I2C_BOARD_INFO("tps65010", 0x48), .irq = OMAP_GPIO_IRQ(58), .platform_data = &tps_board, }, { I2C_BOARD_INFO("isp1301_omap", 0x2d), .irq = OMAP_GPIO_IRQ(2), }, }; static void __init h2_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); } static struct omap_usb_config h2_usb_config __initdata = { /* usb1 has a Mini-AB port and external isp1301 transceiver */ .otg = 2, #ifdef CONFIG_USB_GADGET_OMAP .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */ /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */ #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* needs OTG cable, or NONSTANDARD (B-to-MiniB) */ .hmc_mode = 20, /* 1:dev|otg(off) 1:host 2:disabled */ #endif .pins[1] = 3, }; static struct omap_lcd_config h2_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel h2_config[] __initdata = { { OMAP_TAG_LCD, &h2_lcd_config }, }; static void __init h2_init(void) { h2_init_smc91x(); /* Here we assume the NOR boot config: NOR on CS3 (possibly swapped * to address 0 by a dip switch), NAND on CS2B. The NAND driver will * notice whether a NAND chip is enabled at probe time. * * FIXME revC boards (and H3) support NAND-boot, with a dip switch to * put NOR on CS2B and NAND (which on H2 may be 16bit) on CS3. Try * detecting that in code here, to avoid probing every possible flash * configuration... */ h2_nor_resource.end = h2_nor_resource.start = omap_cs3_phys(); h2_nor_resource.end += SZ_32M - 1; h2_nand_resource.end = h2_nand_resource.start = OMAP_CS2B_PHYS; h2_nand_resource.end += SZ_4K - 1; if (gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0) BUG(); gpio_direction_input(H2_NAND_RB_GPIO_PIN); omap_cfg_reg(L3_1610_FLASH_CS2B_OE); omap_cfg_reg(M8_1610_FLASH_CS2B_WE); /* MMC: card detect and WP */ /* omap_cfg_reg(U19_ARMIO1); */ /* CD */ omap_cfg_reg(BALLOUT_V8_ARMIO3); /* WP */ /* Mux pins for keypad */ omap_cfg_reg(F18_1610_KBC0); omap_cfg_reg(D20_1610_KBC1); omap_cfg_reg(D19_1610_KBC2); omap_cfg_reg(E18_1610_KBC3); omap_cfg_reg(C21_1610_KBC4); omap_cfg_reg(G18_1610_KBR0); omap_cfg_reg(F19_1610_KBR1); omap_cfg_reg(H14_1610_KBR2); omap_cfg_reg(E20_1610_KBR3); omap_cfg_reg(E19_1610_KBR4); omap_cfg_reg(N19_1610_KBR5); platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices)); omap_board_config = h2_config; omap_board_config_size = ARRAY_SIZE(h2_config); omap_serial_init(); omap_register_i2c_bus(1, 100, h2_i2c_board_info, ARRAY_SIZE(h2_i2c_board_info)); omap1_usb_init(&h2_usb_config); h2_mmc_init(); } static void __init h2_map_io(void) { omap1_map_common_io(); } MACHINE_START(OMAP_H2, "TI-H2") /* Maintainer: Imre Deak <imre.deak@nokia.com> */ .boot_params = 0x10000100, .map_io = h2_map_io, .reserve = omap_reserve, .init_irq = h2_init_irq, .init_machine = h2_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
htc-msm8660/android_kernel_htc_msm8660
drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.c
2428
7980
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include "wlc_phy_qmath.h" /* Description: This function make 16 bit unsigned multiplication. To fit the output into 16 bits the 32 bit multiplication result is right shifted by 16 bits. */ u16 qm_mulu16(u16 op1, u16 op2) { return (u16) (((u32) op1 * (u32) op2) >> 16); } /* Description: This function make 16 bit multiplication and return the result in 16 bits. To fit the multiplication result into 16 bits the multiplication result is right shifted by 15 bits. Right shifting 15 bits instead of 16 bits is done to remove the extra sign bit formed due to the multiplication. When both the 16bit inputs are 0x8000 then the output is saturated to 0x7fffffff. */ s16 qm_muls16(s16 op1, s16 op2) { s32 result; if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000) { result = 0x7fffffff; } else { result = ((s32) (op1) * (s32) (op2)); } return (s16) (result >> 15); } /* Description: This function add two 32 bit numbers and return the 32bit result. If the result overflow 32 bits, the output will be saturated to 32bits. */ s32 qm_add32(s32 op1, s32 op2) { s32 result; result = op1 + op2; if (op1 < 0 && op2 < 0 && result > 0) { result = 0x80000000; } else if (op1 > 0 && op2 > 0 && result < 0) { result = 0x7fffffff; } return result; } /* Description: This function add two 16 bit numbers and return the 16bit result. If the result overflow 16 bits, the output will be saturated to 16bits. */ s16 qm_add16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 + (s32) op2; if (temp > (s32) 0x7fff) { result = (s16) 0x7fff; } else if (temp < (s32) 0xffff8000) { result = (s16) 0xffff8000; } else { result = (s16) temp; } return result; } /* Description: This function make 16 bit subtraction and return the 16bit result. If the result overflow 16 bits, the output will be saturated to 16bits. */ s16 qm_sub16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 - (s32) op2; if (temp > (s32) 0x7fff) { result = (s16) 0x7fff; } else if (temp < (s32) 0xffff8000) { result = (s16) 0xffff8000; } else { result = (s16) temp; } return result; } /* Description: This function make a 32 bit saturated left shift when the specified shift is +ve. This function will make a 32 bit right shift when the specified shift is -ve. This function return the result after shifting operation. */ s32 qm_shl32(s32 op, int shift) { int i; s32 result; result = op; if (shift > 31) shift = 31; else if (shift < -31) shift = -31; if (shift >= 0) { for (i = 0; i < shift; i++) { result = qm_add32(result, result); } } else { result = result >> (-shift); } return result; } /* Description: This function make a 16 bit saturated left shift when the specified shift is +ve. This function will make a 16 bit right shift when the specified shift is -ve. This function return the result after shifting operation. */ s16 qm_shl16(s16 op, int shift) { int i; s16 result; result = op; if (shift > 15) shift = 15; else if (shift < -15) shift = -15; if (shift > 0) { for (i = 0; i < shift; i++) { result = qm_add16(result, result); } } else { result = result >> (-shift); } return result; } /* Description: This function make a 16 bit right shift when shift is +ve. This function make a 16 bit saturated left shift when shift is -ve. This function return the result of the shift operation. */ s16 qm_shr16(s16 op, int shift) { return qm_shl16(op, -shift); } /* Description: This function return the number of redundant sign bits in a 32 bit number. Example: qm_norm32(0x00000080) = 23 */ s16 qm_norm32(s32 op) { u16 u16extraSignBits; if (op == 0) { return 31; } else { u16extraSignBits = 0; while ((op >> 31) == (op >> 30)) { u16extraSignBits++; op = op << 1; } } return u16extraSignBits; } /* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */ static const s16 log_table[] = { 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13968, 15055, 16117, 17156, 18173, 19168, 20143, 21098, 22034, 22952, 23852, 24736, 25604, 26455, 27292, 28114, 28922, 29717, 30498, 31267, 32024 }; #define LOG_TABLE_SIZE 32 /* log_table size */ #define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */ #define Q_LOG_TABLE 15 /* qformat of log_table */ #define LOG10_2 19728 /* log10(2) in q.16 */ /* Description: This routine takes the input number N and its q format qN and compute the log10(N). This routine first normalizes the input no N. Then N is in mag*(2^x) format. mag is any number in the range 2^30-(2^31 - 1). Then log2(mag * 2^x) = log2(mag) + x is computed. From that log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed. This routine looks the log2 value in the table considering LOG2_LOG_TABLE_SIZE+1 MSBs. As the MSB is always 1, only next LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup. Next 16 MSBs are used for interpolation. Inputs: N - number to which log10 has to be found. qN - q format of N log10N - address where log10(N) will be written. qLog10N - address where log10N qformat will be written. Note/Problem: For accurate results input should be in normalized or near normalized form. */ void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N) { s16 s16norm, s16tableIndex, s16errorApproximation; u16 u16offset; s32 s32log; /* normalize the N. */ s16norm = qm_norm32(N); N = N << s16norm; /* The qformat of N after normalization. * -30 is added to treat the no as between 1.0 to 2.0 * i.e. after adding the -30 to the qformat the decimal point will be * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e. * at the right side of 30th bit. */ qN = qN + s16norm - 30; /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the MSB */ s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE))); /* remove the MSB. the MSB is always 1 after normalization. */ s16tableIndex = s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1); /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */ N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1); /* take the offset as the 16 MSBS after table index. */ u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16))); /* look the log value in the table. */ s32log = log_table[s16tableIndex]; /* q.15 format */ /* interpolate using the offset. */ s16errorApproximation = (s16) qm_mulu16(u16offset, (u16) (log_table[s16tableIndex + 1] - log_table[s16tableIndex])); /* q.15 */ s32log = qm_add16((s16) s32log, s16errorApproximation); /* q.15 format */ /* adjust for the qformat of the N as * log2(mag * 2^x) = log2(mag) + x */ s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */ /* normalize the result. */ s16norm = qm_norm32(s32log); /* bring all the important bits into lower 16 bits */ s32log = qm_shl32(s32log, s16norm - 16); /* q.15+s16norm-16 format */ /* compute the log10(N) by multiplying log2(N) with log10(2). * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2) * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16) */ *log10N = qm_muls16((s16) s32log, (s16) LOG10_2); /* write the q format of the result. */ *qLog10N = 15 + s16norm - 16 + 1; return; }
gpl-2.0
davidmueller13/kcal
arch/arm/mach-imx/mach-mx31moboard.c
4732
16174
/* * Copyright (C) 2008 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/leds.h> #include <linux/memory.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/regulator/machine.h> #include <linux/mfd/mc13783.h> #include <linux/spi/spi.h> #include <linux/types.h> #include <linux/memblock.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/input.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <asm/memblock.h> #include <mach/board-mx31moboard.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx3.h> #include <mach/ulpi.h> #include "devices-imx31.h" static unsigned int moboard_pins[] = { /* UART0 */ MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, MX31_PIN_CTS1__GPIO2_7, /* UART4 */ MX31_PIN_PC_RST__CTS5, MX31_PIN_PC_VS2__RTS5, MX31_PIN_PC_BVD2__TXD5, MX31_PIN_PC_BVD1__RXD5, /* I2C0 */ MX31_PIN_I2C_DAT__I2C1_SDA, MX31_PIN_I2C_CLK__I2C1_SCL, /* I2C1 */ MX31_PIN_DCD_DTE1__I2C2_SDA, MX31_PIN_RI_DTE1__I2C2_SCL, /* SDHC1 */ MX31_PIN_SD1_DATA3__SD1_DATA3, MX31_PIN_SD1_DATA2__SD1_DATA2, MX31_PIN_SD1_DATA1__SD1_DATA1, MX31_PIN_SD1_DATA0__SD1_DATA0, MX31_PIN_SD1_CLK__SD1_CLK, MX31_PIN_SD1_CMD__SD1_CMD, MX31_PIN_ATA_CS0__GPIO3_26, MX31_PIN_ATA_CS1__GPIO3_27, /* USB reset */ MX31_PIN_GPIO1_0__GPIO1_0, /* USB OTG */ MX31_PIN_USBOTG_DATA0__USBOTG_DATA0, MX31_PIN_USBOTG_DATA1__USBOTG_DATA1, MX31_PIN_USBOTG_DATA2__USBOTG_DATA2, MX31_PIN_USBOTG_DATA3__USBOTG_DATA3, MX31_PIN_USBOTG_DATA4__USBOTG_DATA4, MX31_PIN_USBOTG_DATA5__USBOTG_DATA5, MX31_PIN_USBOTG_DATA6__USBOTG_DATA6, MX31_PIN_USBOTG_DATA7__USBOTG_DATA7, MX31_PIN_USBOTG_CLK__USBOTG_CLK, MX31_PIN_USBOTG_DIR__USBOTG_DIR, MX31_PIN_USBOTG_NXT__USBOTG_NXT, MX31_PIN_USBOTG_STP__USBOTG_STP, MX31_PIN_USB_OC__GPIO1_30, /* USB H2 */ MX31_PIN_USBH2_DATA0__USBH2_DATA0, MX31_PIN_USBH2_DATA1__USBH2_DATA1, MX31_PIN_STXD3__USBH2_DATA2, MX31_PIN_SRXD3__USBH2_DATA3, MX31_PIN_SCK3__USBH2_DATA4, MX31_PIN_SFS3__USBH2_DATA5, MX31_PIN_STXD6__USBH2_DATA6, MX31_PIN_SRXD6__USBH2_DATA7, MX31_PIN_USBH2_CLK__USBH2_CLK, MX31_PIN_USBH2_DIR__USBH2_DIR, MX31_PIN_USBH2_NXT__USBH2_NXT, MX31_PIN_USBH2_STP__USBH2_STP, MX31_PIN_SCK6__GPIO1_25, /* LEDs */ MX31_PIN_SVEN0__GPIO2_0, MX31_PIN_STX0__GPIO2_1, MX31_PIN_SRX0__GPIO2_2, MX31_PIN_SIMPD0__GPIO2_3, /* SPI1 */ MX31_PIN_CSPI2_MOSI__MOSI, MX31_PIN_CSPI2_MISO__MISO, MX31_PIN_CSPI2_SCLK__SCLK, MX31_PIN_CSPI2_SPI_RDY__SPI_RDY, MX31_PIN_CSPI2_SS0__SS0, MX31_PIN_CSPI2_SS2__SS2, /* Atlas IRQ */ MX31_PIN_GPIO1_3__GPIO1_3, /* SPI2 */ MX31_PIN_CSPI3_MOSI__MOSI, MX31_PIN_CSPI3_MISO__MISO, MX31_PIN_CSPI3_SCLK__SCLK, MX31_PIN_CSPI3_SPI_RDY__SPI_RDY, MX31_PIN_CSPI2_SS1__CSPI3_SS1, }; static struct physmap_flash_data mx31moboard_flash_data = { .width = 2, }; static struct resource mx31moboard_flash_resource = { .start = 0xa0000000, .end = 0xa1ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device mx31moboard_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &mx31moboard_flash_data, }, .resource = &mx31moboard_flash_resource, .num_resources = 1, }; static int moboard_uart0_init(struct platform_device *pdev) { int ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CTS1), "uart0-cts-hack"); if (ret) return ret; ret = gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CTS1), 0); if (ret) gpio_free(IOMUX_TO_GPIO(MX31_PIN_CTS1)); return ret; } static void moboard_uart0_exit(struct platform_device *pdev) { gpio_free(IOMUX_TO_GPIO(MX31_PIN_CTS1)); } static const struct imxuart_platform_data uart0_pdata __initconst = { .init = moboard_uart0_init, .exit = moboard_uart0_exit, }; static const struct imxuart_platform_data uart4_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static const struct imxi2c_platform_data moboard_i2c0_data __initconst = { .bitrate = 400000, }; static const struct imxi2c_platform_data moboard_i2c1_data __initconst = { .bitrate = 100000, }; static int moboard_spi1_cs[] = { MXC_SPI_CS(0), MXC_SPI_CS(2), }; static const struct spi_imx_master moboard_spi1_pdata __initconst = { .chipselect = moboard_spi1_cs, .num_chipselect = ARRAY_SIZE(moboard_spi1_cs), }; static struct regulator_consumer_supply sdhc_consumers[] = { { .dev_name = "mxc-mmc.0", .supply = "sdhc0_vcc", }, { .dev_name = "mxc-mmc.1", .supply = "sdhc1_vcc", }, }; static struct regulator_init_data sdhc_vreg_data = { .constraints = { .min_uV = 2700000, .max_uV = 3000000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST, .always_on = 0, .boot_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(sdhc_consumers), .consumer_supplies = sdhc_consumers, }; static struct regulator_consumer_supply cam_consumers[] = { { .dev_name = "mx3_camera.0", .supply = "cam_vcc", }, }; static struct regulator_init_data cam_vreg_data = { .constraints = { .min_uV = 2700000, .max_uV = 3000000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST, .always_on = 0, .boot_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(cam_consumers), .consumer_supplies = cam_consumers, }; static struct mc13xxx_regulator_init_data moboard_regulators[] = { { .id = MC13783_REG_VMMC1, .init_data = &sdhc_vreg_data, }, { .id = MC13783_REG_VCAM, .init_data = &cam_vreg_data, }, }; static struct mc13xxx_led_platform_data moboard_led[] = { { .id = MC13783_LED_R1, .name = "coreboard-led-4:red", .max_current = 2, }, { .id = MC13783_LED_G1, .name = "coreboard-led-4:green", .max_current = 2, }, { .id = MC13783_LED_B1, .name = "coreboard-led-4:blue", .max_current = 2, }, { .id = MC13783_LED_R2, .name = "coreboard-led-5:red", .max_current = 3, }, { .id = MC13783_LED_G2, .name = "coreboard-led-5:green", .max_current = 3, }, { .id = MC13783_LED_B2, .name = "coreboard-led-5:blue", .max_current = 3, }, }; static struct mc13xxx_leds_platform_data moboard_leds = { .num_leds = ARRAY_SIZE(moboard_led), .led = moboard_led, .flags = MC13783_LED_SLEWLIMTC, .abmode = MC13783_LED_AB_DISABLED, .tc1_period = MC13783_LED_PERIOD_10MS, .tc2_period = MC13783_LED_PERIOD_10MS, }; static struct mc13xxx_buttons_platform_data moboard_buttons = { .b1on_flags = MC13783_BUTTON_DBNC_750MS | MC13783_BUTTON_ENABLE | MC13783_BUTTON_POL_INVERT, .b1on_key = KEY_POWER, }; static struct mc13xxx_platform_data moboard_pmic = { .regulators = { .regulators = moboard_regulators, .num_regulators = ARRAY_SIZE(moboard_regulators), }, .leds = &moboard_leds, .buttons = &moboard_buttons, .flags = MC13XXX_USE_RTC | MC13XXX_USE_ADC, }; static struct spi_board_info moboard_spi_board_info[] __initdata = { { .modalias = "mc13783", .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3), .max_speed_hz = 300000, .bus_num = 1, .chip_select = 0, .platform_data = &moboard_pmic, .mode = SPI_CS_HIGH, }, }; static int moboard_spi2_cs[] = { MXC_SPI_CS(1), }; static const struct spi_imx_master moboard_spi2_pdata __initconst = { .chipselect = moboard_spi2_cs, .num_chipselect = ARRAY_SIZE(moboard_spi2_cs), }; #define SDHC1_CD IOMUX_TO_GPIO(MX31_PIN_ATA_CS0) #define SDHC1_WP IOMUX_TO_GPIO(MX31_PIN_ATA_CS1) static int moboard_sdhc1_get_ro(struct device *dev) { return !gpio_get_value(SDHC1_WP); } static int moboard_sdhc1_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; ret = gpio_request(SDHC1_CD, "sdhc-detect"); if (ret) return ret; gpio_direction_input(SDHC1_CD); ret = gpio_request(SDHC1_WP, "sdhc-wp"); if (ret) goto err_gpio_free; gpio_direction_input(SDHC1_WP); ret = request_irq(gpio_to_irq(SDHC1_CD), detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "sdhc1-card-detect", data); if (ret) goto err_gpio_free_2; return 0; err_gpio_free_2: gpio_free(SDHC1_WP); err_gpio_free: gpio_free(SDHC1_CD); return ret; } static void moboard_sdhc1_exit(struct device *dev, void *data) { free_irq(gpio_to_irq(SDHC1_CD), data); gpio_free(SDHC1_WP); gpio_free(SDHC1_CD); } static const struct imxmmc_platform_data sdhc1_pdata __initconst = { .get_ro = moboard_sdhc1_get_ro, .init = moboard_sdhc1_init, .exit = moboard_sdhc1_exit, }; /* * this pin is dedicated for all mx31moboard systems, so we do it here */ #define USB_RESET_B IOMUX_TO_GPIO(MX31_PIN_GPIO1_0) #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS) #define OTG_EN_B IOMUX_TO_GPIO(MX31_PIN_USB_OC) #define USBH2_EN_B IOMUX_TO_GPIO(MX31_PIN_SCK6) static void usb_xcvr_reset(void) { mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_gpr(MUX_PGP_UH2, true); mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG | PAD_CTL_100K_PD); mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG | PAD_CTL_100K_PD); gpio_request(OTG_EN_B, "usb-udc-en"); gpio_direction_output(OTG_EN_B, 0); gpio_request(USBH2_EN_B, "usbh2-en"); gpio_direction_output(USBH2_EN_B, 0); gpio_request(USB_RESET_B, "usb-reset"); gpio_direction_output(USB_RESET_B, 0); mdelay(1); gpio_set_value(USB_RESET_B, 1); mdelay(1); } static int moboard_usbh2_init_hw(struct platform_device *pdev) { return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED); } static struct mxc_usbh_platform_data usbh2_pdata __initdata = { .init = moboard_usbh2_init_hw, .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, }; static int __init moboard_usbh2_init(void) { struct platform_device *pdev; usbh2_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); if (!usbh2_pdata.otg) return -ENODEV; pdev = imx31_add_mxc_ehci_hs(2, &usbh2_pdata); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static const struct gpio_led mx31moboard_leds[] __initconst = { { .name = "coreboard-led-0:red:running", .default_trigger = "heartbeat", .gpio = IOMUX_TO_GPIO(MX31_PIN_SVEN0), }, { .name = "coreboard-led-1:red", .gpio = IOMUX_TO_GPIO(MX31_PIN_STX0), }, { .name = "coreboard-led-2:red", .gpio = IOMUX_TO_GPIO(MX31_PIN_SRX0), }, { .name = "coreboard-led-3:red", .gpio = IOMUX_TO_GPIO(MX31_PIN_SIMPD0), }, }; static const struct gpio_led_platform_data mx31moboard_led_pdata __initconst = { .num_leds = ARRAY_SIZE(mx31moboard_leds), .leds = mx31moboard_leds, }; static const struct ipu_platform_data mx3_ipu_data __initconst = { .irq_base = MXC_IPU_IRQ_START, }; static struct platform_device *devices[] __initdata = { &mx31moboard_flash, }; static struct mx3_camera_pdata camera_pdata __initdata = { .flags = MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10, .mclk_10khz = 4800, }; static phys_addr_t mx3_camera_base __initdata; #define MX3_CAMERA_BUF_SIZE SZ_4M static int __init mx31moboard_init_cam(void) { int dma, ret = -ENOMEM; struct platform_device *pdev; imx31_add_ipu_core(&mx3_ipu_data); pdev = imx31_alloc_mx3_camera(&camera_pdata); if (IS_ERR(pdev)) return PTR_ERR(pdev); dma = dma_declare_coherent_memory(&pdev->dev, mx3_camera_base, mx3_camera_base, MX3_CAMERA_BUF_SIZE, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!(dma & DMA_MEMORY_MAP)) goto err; ret = platform_device_add(pdev); if (ret) err: platform_device_put(pdev); return ret; } static void mx31moboard_poweroff(void) { struct clk *clk = clk_get_sys("imx2-wdt.0", NULL); if (!IS_ERR(clk)) clk_prepare_enable(clk); mxc_iomux_mode(MX31_PIN_WATCHDOG_RST__WATCHDOG_RST); __raw_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); } static int mx31moboard_baseboard; core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444); /* * Board specific initialization. */ static void __init mx31moboard_init(void) { imx31_soc_init(); mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins), "moboard"); platform_add_devices(devices, ARRAY_SIZE(devices)); gpio_led_register_device(-1, &mx31moboard_led_pdata); imx31_add_imx2_wdt(NULL); imx31_add_imx_uart0(&uart0_pdata); imx31_add_imx_uart4(&uart4_pdata); imx31_add_imx_i2c0(&moboard_i2c0_data); imx31_add_imx_i2c1(&moboard_i2c1_data); imx31_add_spi_imx1(&moboard_spi1_pdata); imx31_add_spi_imx2(&moboard_spi2_pdata); gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3)); spi_register_board_info(moboard_spi_board_info, ARRAY_SIZE(moboard_spi_board_info)); imx31_add_mxc_mmc(0, &sdhc1_pdata); mx31moboard_init_cam(); usb_xcvr_reset(); moboard_usbh2_init(); pm_power_off = mx31moboard_poweroff; switch (mx31moboard_baseboard) { case MX31NOBOARD: break; case MX31DEVBOARD: mx31moboard_devboard_init(); break; case MX31MARXBOT: mx31moboard_marxbot_init(); break; case MX31SMARTBOT: case MX31EYEBOT: mx31moboard_smartbot_init(mx31moboard_baseboard); break; default: printk(KERN_ERR "Illegal mx31moboard_baseboard type %d\n", mx31moboard_baseboard); } } static void __init mx31moboard_timer_init(void) { mx31_clocks_init(26000000); } struct sys_timer mx31moboard_timer = { .init = mx31moboard_timer_init, }; static void __init mx31moboard_reserve(void) { /* reserve 4 MiB for mx3-camera */ mx3_camera_base = arm_memblock_steal(MX3_CAMERA_BUF_SIZE, MX3_CAMERA_BUF_SIZE); } MACHINE_START(MX31MOBOARD, "EPFL Mobots mx31moboard") /* Maintainer: Philippe Retornaz, EPFL Mobots group */ .atag_offset = 0x100, .reserve = mx31moboard_reserve, .map_io = mx31_map_io, .init_early = imx31_init_early, .init_irq = mx31_init_irq, .handle_irq = imx31_handle_irq, .timer = &mx31moboard_timer, .init_machine = mx31moboard_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
ccotter/linux-deterministic
drivers/isdn/hisax/teleint.c
4988
7874
/* $Id: teleint.c,v 1.16.2.5 2004/01/19 15:31:50 keil Exp $ * * low level stuff for TeleInt isdn cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hfc_2bs0.h" #include "isdnl1.h" static const char *TeleInt_revision = "$Revision: 1.16.2.5 $"; #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return (0); } ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i < size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } data[i] = bytein(adr); } } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i < size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data[i]); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { cs->hw.hfc.cip = offset; return (readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { cs->hw.hfc.cip = offset; writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { cs->hw.hfc.cip = 0; readfifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { cs->hw.hfc.cip = 0; writefifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static u_char ReadHFC(struct IsdnCardState *cs, int data, u_char reg) { register u_char ret; if (data) { cs->hw.hfc.cip = reg; byteout(cs->hw.hfc.addr | 1, reg); ret = bytein(cs->hw.hfc.addr); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc RD %02x %02x", reg, ret); } else ret = bytein(cs->hw.hfc.addr | 1); return (ret); } static void WriteHFC(struct IsdnCardState *cs, int data, u_char reg, u_char value) { byteout(cs->hw.hfc.addr | 1, reg); cs->hw.hfc.cip = reg; if (data) byteout(cs->hw.hfc.addr, value); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc W%c %02x %02x", data ? 'D' : 'C', reg, value); } static irqreturn_t TeleInt_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_long flags; spin_lock_irqsave(&cs->lock, flags); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); if (val) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0xFF); writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void TeleInt_Timer(struct IsdnCardState *cs) { int stat = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (cs->bcs[0].mode) { stat |= 1; main_irq_hfc(&cs->bcs[0]); } if (cs->bcs[1].mode) { stat |= 2; main_irq_hfc(&cs->bcs[1]); } spin_unlock_irqrestore(&cs->lock, flags); stat = HZ / 100; if (!stat) stat = 1; cs->hw.hfc.timer.expires = jiffies + stat; add_timer(&cs->hw.hfc.timer); } static void release_io_TeleInt(struct IsdnCardState *cs) { del_timer(&cs->hw.hfc.timer); releasehfc(cs); if (cs->hw.hfc.addr) release_region(cs->hw.hfc.addr, 2); } static void reset_TeleInt(struct IsdnCardState *cs) { printk(KERN_INFO "TeleInt: resetting card\n"); cs->hw.hfc.cirm |= HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset On */ mdelay(10); cs->hw.hfc.cirm &= ~HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset Off */ mdelay(10); } static int TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; int delay; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_TeleInt(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); inithfc(cs); clear_pending_isac_ints(cs); initisac(cs); /* Reenable all IRQ */ cs->writeisac(cs, ISAC_MASK, 0); cs->writeisac(cs, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&cs->lock, flags); delay = HZ / 100; if (!delay) delay = 1; cs->hw.hfc.timer.expires = jiffies + delay; add_timer(&cs->hw.hfc.timer); return (0); case CARD_TEST: return (0); } return (0); } int __devinit setup_TeleInt(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, TeleInt_revision); printk(KERN_INFO "HiSax: TeleInt driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELEINT) return (0); cs->hw.hfc.addr = card->para[1] & 0x3fe; cs->irq = card->para[0]; cs->hw.hfc.cirm = HFC_CIRM; cs->hw.hfc.isac_spcr = 0x00; cs->hw.hfc.cip = 0; cs->hw.hfc.ctmt = HFC_CTMT | HFC_CLTIMER; cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfc.fifosize = 7 * 1024 + 512; cs->hw.hfc.timer.function = (void *) TeleInt_Timer; cs->hw.hfc.timer.data = (long) cs; init_timer(&cs->hw.hfc.timer); if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) { printk(KERN_WARNING "HiSax: TeleInt config port %x-%x already in use\n", cs->hw.hfc.addr, cs->hw.hfc.addr + 2); return (0); } /* HW IO = IO */ byteout(cs->hw.hfc.addr, cs->hw.hfc.addr & 0xff); byteout(cs->hw.hfc.addr | 1, ((cs->hw.hfc.addr & 0x300) >> 8) | 0x54); switch (cs->irq) { case 3: cs->hw.hfc.cirm |= HFC_INTA; break; case 4: cs->hw.hfc.cirm |= HFC_INTB; break; case 5: cs->hw.hfc.cirm |= HFC_INTC; break; case 7: cs->hw.hfc.cirm |= HFC_INTD; break; case 10: cs->hw.hfc.cirm |= HFC_INTE; break; case 11: cs->hw.hfc.cirm |= HFC_INTF; break; default: printk(KERN_WARNING "TeleInt: wrong IRQ\n"); release_io_TeleInt(cs); return (0); } byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.ctmt); printk(KERN_INFO "TeleInt: defined at 0x%x IRQ %d\n", cs->hw.hfc.addr, cs->irq); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHFC; cs->BC_Write_Reg = &WriteHFC; cs->cardmsg = &TeleInt_card_msg; cs->irq_func = &TeleInt_interrupt; ISACVersion(cs, "TeleInt:"); return (1); }
gpl-2.0
UberSlim/KernelSanders_L90
drivers/isdn/hisax/teles0.c
4988
9226
/* $Id: teles0.c,v 2.15.2.4 2004/01/13 23:48:39 keil Exp $ * * low level stuff for Teles Memory IO isdn cards * * Author Karsten Keil * based on the teles driver from Jan den Ouden * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Thanks to Jan den Ouden * Fritz Elfert * Beat Doebeli * */ #include <linux/init.h> #include "hisax.h" #include "isdnl1.h" #include "isac.h" #include "hscx.h" static const char *teles0_revision = "$Revision: 2.15.2.4 $"; #define TELES_IOMEM_SIZE 0x400 #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) static inline u_char readisac(void __iomem *adr, u_char off) { return readb(adr + ((off & 1) ? 0x2ff : 0x100) + off); } static inline void writeisac(void __iomem *adr, u_char off, u_char data) { writeb(data, adr + ((off & 1) ? 0x2ff : 0x100) + off); mb(); } static inline u_char readhscx(void __iomem *adr, int hscx, u_char off) { return readb(adr + (hscx ? 0x1c0 : 0x180) + ((off & 1) ? 0x1ff : 0) + off); } static inline void writehscx(void __iomem *adr, int hscx, u_char off, u_char data) { writeb(data, adr + (hscx ? 0x1c0 : 0x180) + ((off & 1) ? 0x1ff : 0) + off); mb(); } static inline void read_fifo_isac(void __iomem *adr, u_char *data, int size) { register int i; register u_char __iomem *ad = adr + 0x100; for (i = 0; i < size; i++) data[i] = readb(ad); } static inline void write_fifo_isac(void __iomem *adr, u_char *data, int size) { register int i; register u_char __iomem *ad = adr + 0x100; for (i = 0; i < size; i++) { writeb(data[i], ad); mb(); } } static inline void read_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size) { register int i; register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180); for (i = 0; i < size; i++) data[i] = readb(ad); } static inline void write_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size) { int i; register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180); for (i = 0; i < size; i++) { writeb(data[i], ad); mb(); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readisac(cs->hw.teles0.membase, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writeisac(cs->hw.teles0.membase, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { read_fifo_isac(cs->hw.teles0.membase, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { write_fifo_isac(cs->hw.teles0.membase, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readhscx(cs->hw.teles0.membase, hscx, offset)); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writehscx(cs->hw.teles0.membase, hscx, offset, value); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg) #define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data) #define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #include "hscx_irq.c" static irqreturn_t teles0_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_long flags; int count = 0; spin_lock_irqsave(&cs->lock, flags); val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA); Start_HSCX: if (val) hscx_int_main(cs, val); val = readisac(cs->hw.teles0.membase, ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); count++; val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA); if (val && count < 5) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX IntStat after IntRoutine"); goto Start_HSCX; } val = readisac(cs->hw.teles0.membase, ISAC_ISTA); if (val && count < 5) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0); writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_teles0(struct IsdnCardState *cs) { if (cs->hw.teles0.cfg_reg) release_region(cs->hw.teles0.cfg_reg, 8); iounmap(cs->hw.teles0.membase); release_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE); } static int reset_teles0(struct IsdnCardState *cs) { u_char cfval; if (cs->hw.teles0.cfg_reg) { switch (cs->irq) { case 2: case 9: cfval = 0x00; break; case 3: cfval = 0x02; break; case 4: cfval = 0x04; break; case 5: cfval = 0x06; break; case 10: cfval = 0x08; break; case 11: cfval = 0x0A; break; case 12: cfval = 0x0C; break; case 15: cfval = 0x0E; break; default: return (1); } cfval |= ((cs->hw.teles0.phymem >> 9) & 0xF0); byteout(cs->hw.teles0.cfg_reg + 4, cfval); HZDELAY(HZ / 10 + 1); byteout(cs->hw.teles0.cfg_reg + 4, cfval | 1); HZDELAY(HZ / 10 + 1); } writeb(0, cs->hw.teles0.membase + 0x80); mb(); HZDELAY(HZ / 5 + 1); writeb(1, cs->hw.teles0.membase + 0x80); mb(); HZDELAY(HZ / 5 + 1); return (0); } static int Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_teles0(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_teles0(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithscxisac(cs, 3); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } int __devinit setup_teles0(struct IsdnCard *card) { u_char val; struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, teles0_revision); printk(KERN_INFO "HiSax: Teles 8.0/16.0 driver Rev. %s\n", HiSax_getrev(tmp)); if ((cs->typ != ISDN_CTYPE_16_0) && (cs->typ != ISDN_CTYPE_8_0)) return (0); if (cs->typ == ISDN_CTYPE_16_0) cs->hw.teles0.cfg_reg = card->para[2]; else /* 8.0 */ cs->hw.teles0.cfg_reg = 0; if (card->para[1] < 0x10000) { card->para[1] <<= 4; printk(KERN_INFO "Teles0: membase configured DOSish, assuming 0x%lx\n", (unsigned long) card->para[1]); } cs->irq = card->para[0]; if (cs->hw.teles0.cfg_reg) { if (!request_region(cs->hw.teles0.cfg_reg, 8, "teles cfg")) { printk(KERN_WARNING "HiSax: %s config port %x-%x already in use\n", CardType[card->typ], cs->hw.teles0.cfg_reg, cs->hw.teles0.cfg_reg + 8); return (0); } } if (cs->hw.teles0.cfg_reg) { if ((val = bytein(cs->hw.teles0.cfg_reg + 0)) != 0x51) { printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n", cs->hw.teles0.cfg_reg + 0, val); release_region(cs->hw.teles0.cfg_reg, 8); return (0); } if ((val = bytein(cs->hw.teles0.cfg_reg + 1)) != 0x93) { printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n", cs->hw.teles0.cfg_reg + 1, val); release_region(cs->hw.teles0.cfg_reg, 8); return (0); } val = bytein(cs->hw.teles0.cfg_reg + 2); /* 0x1e=without AB * 0x1f=with AB * 0x1c 16.3 ??? */ if (val != 0x1e && val != 0x1f) { printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n", cs->hw.teles0.cfg_reg + 2, val); release_region(cs->hw.teles0.cfg_reg, 8); return (0); } } /* 16.0 and 8.0 designed for IOM1 */ test_and_set_bit(HW_IOM1, &cs->HW_Flags); cs->hw.teles0.phymem = card->para[1]; if (!request_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE, "teles iomem")) { printk(KERN_WARNING "HiSax: %s memory region %lx-%lx already in use\n", CardType[card->typ], cs->hw.teles0.phymem, cs->hw.teles0.phymem + TELES_IOMEM_SIZE); if (cs->hw.teles0.cfg_reg) release_region(cs->hw.teles0.cfg_reg, 8); return (0); } cs->hw.teles0.membase = ioremap(cs->hw.teles0.phymem, TELES_IOMEM_SIZE); printk(KERN_INFO "HiSax: %s config irq:%d mem:%p cfg:0x%X\n", CardType[cs->typ], cs->irq, cs->hw.teles0.membase, cs->hw.teles0.cfg_reg); if (reset_teles0(cs)) { printk(KERN_WARNING "Teles0: wrong IRQ\n"); release_io_teles0(cs); return (0); } setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Teles_card_msg; cs->irq_func = &teles0_interrupt; ISACVersion(cs, "Teles0:"); if (HscxVersion(cs, "Teles0:")) { printk(KERN_WARNING "Teles0: wrong HSCX versions check IO/MEM addresses\n"); release_io_teles0(cs); return (0); } return (1); }
gpl-2.0
civato/kernel_p900
drivers/net/ethernet/dec/tulip/media.c
4988
16713
/* drivers/net/ethernet/dec/tulip/media.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "tulip.h" /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues or future 66Mhz PCI. */ #define mdio_delay() ioread32(mdio_addr) /* Read and write the MII registers using software-generated serial MDIO protocol. It is just different enough from the EEPROM protocol to not share code. The maxium data clock rate is 2.5 Mhz. */ #define MDIO_SHIFT_CLK 0x10000 #define MDIO_DATA_WRITE0 0x00000 #define MDIO_DATA_WRITE1 0x20000 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ #define MDIO_ENB_IN 0x40000 #define MDIO_DATA_READ 0x80000 static const unsigned char comet_miireg2offset[32] = { 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") or DP83840A data sheet for more details. */ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) { struct tulip_private *tp = netdev_priv(dev); int i; int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; int retval = 0; void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return 0xffff; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) return ioread32(ioaddr + comet_miireg2offset[location]); return 0xffff; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); ioread32(ioaddr + 0xA0); ioread32(ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return retval & 0xffff; } /* Establish sync by sending at least 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); return (retval>>1) & 0xffff; } void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct tulip_private *tp = netdev_priv(dev); int i; int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) iowrite32(val, ioaddr + comet_miireg2offset[location]); return; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(cmd, ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return; } /* Establish sync by sending 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); } /* Set up the transceiver control registers for the selected media type. */ void tulip_select_media(struct net_device *dev, int startup) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mediatable *mtable = tp->mtable; u32 new_csr6; int i; if (mtable) { struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; unsigned char *p = mleaf->leafdata; switch (mleaf->type) { case 0: /* 21140 non-MII xcvr. */ if (tulip_debug > 1) netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", p[1]); dev->if_port = p[0]; if (startup) iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(p[1], ioaddr + CSR12); new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); break; case 2: case 4: { u16 setup[5]; u32 csr13val, csr14val, csr15dir, csr15val; for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); dev->if_port = p[0] & MEDIA_MASK; if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) tp->full_duplex = 1; if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } if (tulip_debug > 1) netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", medianame[dev->if_port], setup[0], setup[1]); if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ csr13val = setup[0]; csr14val = setup[1]; csr15dir = (setup[3]<<16) | setup[2]; csr15val = (setup[4]<<16) | setup[2]; iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ iowrite32(csr13val, ioaddr + CSR13); } else { csr13val = 1; csr14val = 0; csr15dir = (setup[0]<<16) | 0x0008; csr15val = (setup[1]<<16) | 0x0008; if (dev->if_port <= 4) csr14val = t21142_csr14[dev->if_port]; if (startup) { iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); } iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ if (startup) iowrite32(csr13val, ioaddr + CSR13); } if (tulip_debug > 1) netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", csr15dir, csr15val); if (mleaf->type == 4) new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); else new_csr6 = 0x82420000; break; } case 1: case 3: { int phy_num = p[0]; int init_length = p[1]; u16 *misc_info, tmp_info; dev->if_port = 11; new_csr6 = 0x020E0000; if (mleaf->type == 3) { /* 21142 */ u16 *init_sequence = (u16*)(p+2); u16 *reset_sequence = &((u16*)(p+3))[init_length]; int reset_length = p[2 + init_length*2]; misc_info = reset_sequence + reset_length; if (startup) { int timeout = 10; /* max 1 ms */ for (i = 0; i < reset_length; i++) iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); /* flush posted writes */ ioread32(ioaddr + CSR15); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); ioread32(ioaddr + CSR15); /* flush posted writes */ } else { u8 *init_sequence = p + 2; u8 *reset_sequence = p + 3 + init_length; int reset_length = p[2 + init_length]; misc_info = (u16*)(reset_sequence + reset_length); if (startup) { int timeout = 10; /* max 1 ms */ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); for (i = 0; i < reset_length; i++) iowrite32(reset_sequence[i], ioaddr + CSR12); /* flush posted writes */ ioread32(ioaddr + CSR12); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(init_sequence[i], ioaddr + CSR12); ioread32(ioaddr + CSR12); /* flush posted writes */ } tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; if (tmp_info && startup < 2) { if (tp->mii_advertise == 0) tp->mii_advertise = tp->advertising[phy_num]; if (tulip_debug > 1) netdev_dbg(dev, " Advertising %04x on MII %d\n", tp->mii_advertise, tp->phys[phy_num]); tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); } break; } case 5: case 6: { u16 setup[5]; new_csr6 = 0; /* FIXME */ for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } break; } default: netdev_dbg(dev, " Invalid media table selection %d\n", mleaf->type); new_csr6 = 0x020E0000; } if (tulip_debug > 1) netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12) & 0xff); } else if (tp->chip_id == LC82C168) { if (startup && ! tp->medialock) dev->if_port = tp->mii_cnt ? 11 : 0; if (tulip_debug > 1) netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", ioread32(ioaddr + 0xB8), medianame[dev->if_port]); if (tp->mii_cnt) { new_csr6 = 0x810C0000; iowrite32(0x0001, ioaddr + CSR15); iowrite32(0x0201B07A, ioaddr + 0xB8); } else if (startup) { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); } else if (dev->if_port == 3 || dev->if_port == 5) { iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } } else { /* Unknown chip type with no media table. */ if (tp->default_port == 0) dev->if_port = tp->mii_cnt ? 11 : 3; if (tulip_media_cap[dev->if_port] & MediaIsMII) { new_csr6 = 0x020E0000; } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { new_csr6 = 0x02860000; } else new_csr6 = 0x03860000; if (tulip_debug > 1) netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12)); } tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); mdelay(1); } /* Check the MII negotiated duplex and change the CSR6 setting if required. Return 0 if everything is OK. Return < 0 if the transceiver is missing or has no link beat. */ int tulip_check_duplex(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); unsigned int bmsr, lpa, negotiated, new_csr6; bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); if (tulip_debug > 1) dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", bmsr, lpa); if (bmsr == 0xffff) return -2; if ((bmsr & BMSR_LSTATUS) == 0) { int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); if ((new_bmsr & BMSR_LSTATUS) == 0) { if (tulip_debug > 1) dev_info(&dev->dev, "No link beat on the MII interface, status %04x\n", new_bmsr); return -1; } } negotiated = lpa & tp->advertising[0]; tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); new_csr6 = tp->csr6; if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; else new_csr6 |= TxThreshold; if (tp->full_duplex) new_csr6 |= FullDuplex; else new_csr6 &= ~FullDuplex; if (new_csr6 != tp->csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); if (tulip_debug > 0) dev_info(&dev->dev, "Setting %s-duplex based on MII#%d link partner capability of %04x\n", tp->full_duplex ? "full" : "half", tp->phys[0], lpa); return 1; } return 0; } void __devinit tulip_find_mii (struct net_device *dev, int board_idx) { struct tulip_private *tp = netdev_priv(dev); int phyn, phy_idx = 0; int mii_reg0; int mii_advert; unsigned int to_advert, new_bmcr, ane_switch; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) { int phy = phyn & 0x1f; int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); if ((mii_status & 0x8301) == 0x8001 || ((mii_status & BMSR_100BASE4) == 0 && (mii_status & 0x7800) != 0)) { /* preserve Becker logic, gain indentation level */ } else { continue; } mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); ane_switch = 0; /* if not advertising at all, gen an * advertising value from the capability * bits in BMSR */ if ((mii_advert & ADVERTISE_ALL) == 0) { unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; } if (tp->mii_advertise) { tp->advertising[phy_idx] = to_advert = tp->mii_advertise; } else if (tp->advertising[phy_idx]) { to_advert = tp->advertising[phy_idx]; } else { tp->advertising[phy_idx] = tp->mii_advertise = to_advert = mii_advert; } tp->phys[phy_idx++] = phy; pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", board_idx, phy, mii_reg0, mii_status, mii_advert); /* Fixup for DLink with miswired PHY. */ if (mii_advert != to_advert) { pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", board_idx, to_advert, phy, mii_advert); tulip_mdio_write (dev, phy, 4, to_advert); } /* Enable autonegotiation: some boards default to off. */ if (tp->default_port == 0) { new_bmcr = mii_reg0 | BMCR_ANENABLE; if (new_bmcr != mii_reg0) { new_bmcr |= BMCR_ANRESTART; ane_switch = 1; } } /* ...or disable nway, if forcing media */ else { new_bmcr = mii_reg0 & ~BMCR_ANENABLE; if (new_bmcr != mii_reg0) ane_switch = 1; } /* clear out bits we never want at this point */ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | BMCR_RESET); if (tp->full_duplex) new_bmcr |= BMCR_FULLDPLX; if (tulip_media_cap[tp->default_port] & MediaIs100) new_bmcr |= BMCR_SPEED100; if (new_bmcr != mii_reg0) { /* some phys need the ANE switch to * happen before forced media settings * will "take." However, we write the * same value twice in order not to * confuse the sane phys. */ if (ane_switch) { tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); udelay (10); } tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); } } tp->mii_cnt = phy_idx; if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", board_idx); tp->phys[0] = 1; } }
gpl-2.0
DevSwift/Kernel-3.4-U8500
drivers/rtc/rtc-bq32k.c
4988
4731
/* * Driver for TI BQ32000 RTC. * * Copyright (C) 2009 Semihalf. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/bcd.h> #define BQ32K_SECONDS 0x00 /* Seconds register address */ #define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */ #define BQ32K_STOP 0x80 /* Oscillator Stop flat */ #define BQ32K_MINUTES 0x01 /* Minutes register address */ #define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */ #define BQ32K_OF 0x80 /* Oscillator Failure flag */ #define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */ #define BQ32K_CENT 0x40 /* Century flag */ #define BQ32K_CENT_EN 0x80 /* Century flag enable bit */ struct bq32k_regs { uint8_t seconds; uint8_t minutes; uint8_t cent_hours; uint8_t day; uint8_t date; uint8_t month; uint8_t years; }; static struct i2c_driver bq32k_driver; static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len) { struct i2c_client *client = to_i2c_client(dev); struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &off, }, { .addr = client->addr, .flags = I2C_M_RD, .len = len, .buf = data, } }; if (i2c_transfer(client->adapter, msgs, 2) == 2) return 0; return -EIO; } static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len) { struct i2c_client *client = to_i2c_client(dev); uint8_t buffer[len + 1]; buffer[0] = off; memcpy(&buffer[1], data, len); if (i2c_master_send(client, buffer, len + 1) == len + 1) return 0; return -EIO; } static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct bq32k_regs regs; int error; error = bq32k_read(dev, &regs, 0, sizeof(regs)); if (error) return error; tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK); tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK); tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK); tm->tm_mday = bcd2bin(regs.date); tm->tm_wday = bcd2bin(regs.day) - 1; tm->tm_mon = bcd2bin(regs.month) - 1; tm->tm_year = bcd2bin(regs.years) + ((regs.cent_hours & BQ32K_CENT) ? 100 : 0); return rtc_valid_tm(tm); } static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct bq32k_regs regs; regs.seconds = bin2bcd(tm->tm_sec); regs.minutes = bin2bcd(tm->tm_min); regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN; regs.day = bin2bcd(tm->tm_wday + 1); regs.date = bin2bcd(tm->tm_mday); regs.month = bin2bcd(tm->tm_mon + 1); if (tm->tm_year >= 100) { regs.cent_hours |= BQ32K_CENT; regs.years = bin2bcd(tm->tm_year - 100); } else regs.years = bin2bcd(tm->tm_year); return bq32k_write(dev, &regs, 0, sizeof(regs)); } static const struct rtc_class_ops bq32k_rtc_ops = { .read_time = bq32k_rtc_read_time, .set_time = bq32k_rtc_set_time, }; static int bq32k_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct rtc_device *rtc; uint8_t reg; int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; /* Check Oscillator Stop flag */ error = bq32k_read(dev, &reg, BQ32K_SECONDS, 1); if (!error && (reg & BQ32K_STOP)) { dev_warn(dev, "Oscillator was halted. Restarting...\n"); reg &= ~BQ32K_STOP; error = bq32k_write(dev, &reg, BQ32K_SECONDS, 1); } if (error) return error; /* Check Oscillator Failure flag */ error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1); if (!error && (reg & BQ32K_OF)) { dev_warn(dev, "Oscillator Failure. Check RTC battery.\n"); reg &= ~BQ32K_OF; error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1); } if (error) return error; rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev, &bq32k_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); i2c_set_clientdata(client, rtc); return 0; } static int __devexit bq32k_remove(struct i2c_client *client) { struct rtc_device *rtc = i2c_get_clientdata(client); rtc_device_unregister(rtc); return 0; } static const struct i2c_device_id bq32k_id[] = { { "bq32000", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bq32k_id); static struct i2c_driver bq32k_driver = { .driver = { .name = "bq32k", .owner = THIS_MODULE, }, .probe = bq32k_probe, .remove = __devexit_p(bq32k_remove), .id_table = bq32k_id, }; module_i2c_driver(bq32k_driver); MODULE_AUTHOR("Semihalf, Piotr Ziecik <kosmo@semihalf.com>"); MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver"); MODULE_LICENSE("GPL");
gpl-2.0
DJSteve/G800F-LL_Kernel
drivers/isdn/hardware/eicon/os_4bri.c
9596
28951
/* $Id: os_4bri.c,v 1.28.4.4 2005/02/11 19:40:25 armin Exp $ */ #include "platform.h" #include "debuglib.h" #include "cardtype.h" #include "pc.h" #include "pr_pc.h" #include "di_defs.h" #include "dsp_defs.h" #include "di.h" #include "io.h" #include "xdi_msg.h" #include "xdi_adapter.h" #include "os_4bri.h" #include "diva_pci.h" #include "mi_pc.h" #include "dsrv4bri.h" #include "helpers.h" static void *diva_xdiLoadFileFile = NULL; static dword diva_xdiLoadFileLength = 0; /* ** IMPORTS */ extern void prepare_qBri_functions(PISDN_ADAPTER IoAdapter); extern void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter); extern void diva_xdi_display_adapter_features(int card); extern void diva_add_slave_adapter(diva_os_xdi_adapter_t *a); extern int qBri_FPGA_download(PISDN_ADAPTER IoAdapter); extern void start_qBri_hardware(PISDN_ADAPTER IoAdapter); extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a); /* ** LOCALS */ static unsigned long _4bri_bar_length[4] = { 0x100, 0x100, /* I/O */ MQ_MEMORY_SIZE, 0x2000 }; static unsigned long _4bri_v2_bar_length[4] = { 0x100, 0x100, /* I/O */ MQ2_MEMORY_SIZE, 0x10000 }; static unsigned long _4bri_v2_bri_bar_length[4] = { 0x100, 0x100, /* I/O */ BRI2_MEMORY_SIZE, 0x10000 }; static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a); static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a); static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t *cmd, int length); static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a); static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data, dword length); static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter); static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte *data, dword length, dword limit); static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features); static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter); static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a); static int _4bri_is_rev_2_card(int card_ordinal) { switch (card_ordinal) { case CARDTYPE_DIVASRV_Q_8M_V2_PCI: case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI: case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_B_2F_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: return (1); } return (0); } static int _4bri_is_rev_2_bri_card(int card_ordinal) { switch (card_ordinal) { case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_B_2F_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: return (1); } return (0); } static void diva_4bri_set_addresses(diva_os_xdi_adapter_t *a) { dword offset = a->resources.pci.qoffset; dword c_offset = offset * a->xdi_adapter.ControllerNumber; a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 2; a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2; a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2; a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 0; a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 3; a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 0; /* Set up hardware related pointers */ a->xdi_adapter.Address = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.Address += c_offset; a->xdi_adapter.Control = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.ram = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.ram += c_offset + (offset - MQ_SHARED_RAM_SIZE); a->xdi_adapter.reset = a->resources.pci.addr[0]; /* BAR0 CONFIG */ /* ctlReg contains the register address for the MIPS CPU reset control */ a->xdi_adapter.ctlReg = a->resources.pci.addr[3]; /* BAR3 CNTRL */ /* prom contains the register address for FPGA and EEPROM programming */ a->xdi_adapter.prom = &a->xdi_adapter.reset[0x6E]; } /* ** BAR0 - MEM - 0x100 - CONFIG MEM ** BAR1 - I/O - 0x100 - UNUSED ** BAR2 - MEM - MQ_MEMORY_SIZE (MQ2_MEMORY_SIZE on Rev.2) - SDRAM ** BAR3 - MEM - 0x2000 (0x10000 on Rev.2) - CNTRL ** ** Called by master adapter, that will initialize and add slave adapters */ int diva_4bri_init_card(diva_os_xdi_adapter_t *a) { int bar, i; byte __iomem *p; PADAPTER_LIST_ENTRY quadro_list; diva_os_xdi_adapter_t *diva_current; diva_os_xdi_adapter_t *adapter_list[4]; PISDN_ADAPTER Slave; unsigned long bar_length[ARRAY_SIZE(_4bri_bar_length)]; int v2 = _4bri_is_rev_2_card(a->CardOrdinal); int tasks = _4bri_is_rev_2_bri_card(a->CardOrdinal) ? 1 : MQ_INSTANCE_COUNT; int factor = (tasks == 1) ? 1 : 2; if (v2) { if (_4bri_is_rev_2_bri_card(a->CardOrdinal)) { memcpy(bar_length, _4bri_v2_bri_bar_length, sizeof(bar_length)); } else { memcpy(bar_length, _4bri_v2_bar_length, sizeof(bar_length)); } } else { memcpy(bar_length, _4bri_bar_length, sizeof(bar_length)); } DBG_TRC(("SDRAM_LENGTH=%08x, tasks=%d, factor=%d", bar_length[2], tasks, factor)) /* Get Serial Number The serial number of 4BRI is accessible in accordance with PCI spec via command register located in configuration space, also we do not have to map any BAR before we can access it */ if (!_4bri_get_serial_number(a)) { DBG_ERR(("A: 4BRI can't get Serial Number")) diva_4bri_cleanup_adapter(a); return (-1); } /* Set properties */ a->xdi_adapter.Properties = CardProperties[a->CardOrdinal]; DBG_LOG(("Load %s, SN:%ld, bus:%02x, func:%02x", a->xdi_adapter.Properties.Name, a->xdi_adapter.serialNo, a->resources.pci.bus, a->resources.pci.func)) /* First initialization step: get and check hardware resoures. Do not map resources and do not access card at this step */ for (bar = 0; bar < 4; bar++) { a->resources.pci.bar[bar] = divasa_get_pci_bar(a->resources.pci.bus, a->resources.pci.func, bar, a->resources.pci.hdev); if (!a->resources.pci.bar[bar] || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) { DBG_ERR( ("A: invalid bar[%d]=%08x", bar, a->resources.pci.bar[bar])) return (-1); } } a->resources.pci.irq = (byte) divasa_get_pci_irq(a->resources.pci.bus, a->resources.pci.func, a->resources.pci.hdev); if (!a->resources.pci.irq) { DBG_ERR(("A: invalid irq")); return (-1); } a->xdi_adapter.sdram_bar = a->resources.pci.bar[2]; /* Map all MEMORY BAR's */ for (bar = 0; bar < 4; bar++) { if (bar != 1) { /* ignore I/O */ a->resources.pci.addr[bar] = divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar], bar_length[bar]); if (!a->resources.pci.addr[bar]) { DBG_ERR(("A: 4BRI: can't map bar[%d]", bar)) diva_4bri_cleanup_adapter(a); return (-1); } } } /* Register I/O port */ sprintf(&a->port_name[0], "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo); if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1], bar_length[1], &a->port_name[0], 1)) { DBG_ERR(("A: 4BRI: can't register bar[1]")) diva_4bri_cleanup_adapter(a); return (-1); } a->resources.pci.addr[1] = (void *) (unsigned long) a->resources.pci.bar[1]; /* Set cleanup pointer for base adapter only, so slave adapter will be unable to get cleanup */ a->interface.cleanup_adapter_proc = diva_4bri_cleanup_adapter; /* Create slave adapters */ if (tasks > 1) { if (!(a->slave_adapters[0] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_4bri_cleanup_adapter(a); return (-1); } if (!(a->slave_adapters[1] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_os_free(0, a->slave_adapters[0]); a->slave_adapters[0] = NULL; diva_4bri_cleanup_adapter(a); return (-1); } if (!(a->slave_adapters[2] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_os_free(0, a->slave_adapters[0]); diva_os_free(0, a->slave_adapters[1]); a->slave_adapters[0] = NULL; a->slave_adapters[1] = NULL; diva_4bri_cleanup_adapter(a); return (-1); } memset(a->slave_adapters[0], 0x00, sizeof(*a)); memset(a->slave_adapters[1], 0x00, sizeof(*a)); memset(a->slave_adapters[2], 0x00, sizeof(*a)); } adapter_list[0] = a; adapter_list[1] = a->slave_adapters[0]; adapter_list[2] = a->slave_adapters[1]; adapter_list[3] = a->slave_adapters[2]; /* Allocate slave list */ quadro_list = (PADAPTER_LIST_ENTRY) diva_os_malloc(0, sizeof(*quadro_list)); if (!(a->slave_list = quadro_list)) { for (i = 0; i < (tasks - 1); i++) { diva_os_free(0, a->slave_adapters[i]); a->slave_adapters[i] = NULL; } diva_4bri_cleanup_adapter(a); return (-1); } memset(quadro_list, 0x00, sizeof(*quadro_list)); /* Set interfaces */ a->xdi_adapter.QuadroList = quadro_list; for (i = 0; i < tasks; i++) { adapter_list[i]->xdi_adapter.ControllerNumber = i; adapter_list[i]->xdi_adapter.tasks = tasks; quadro_list->QuadroAdapter[i] = &adapter_list[i]->xdi_adapter; } for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; diva_current->dsp_mask = 0x00000003; diva_current->xdi_adapter.a.io = &diva_current->xdi_adapter; diva_current->xdi_adapter.DIRequest = request; diva_current->interface.cmd_proc = diva_4bri_cmd_card_proc; diva_current->xdi_adapter.Properties = CardProperties[a->CardOrdinal]; diva_current->CardOrdinal = a->CardOrdinal; diva_current->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels; diva_current->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info; diva_current->xdi_adapter.e_tbl = diva_os_malloc(0, diva_current->xdi_adapter.e_max * sizeof(E_INFO)); if (!diva_current->xdi_adapter.e_tbl) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } memset(diva_current->xdi_adapter.e_tbl, 0x00, diva_current->xdi_adapter.e_max * sizeof(E_INFO)); if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.isr_spin_lock, "isr")) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.data_spin_lock, "data")) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } strcpy(diva_current->xdi_adapter.req_soft_isr. dpc_thread_name, "kdivas4brid"); if (diva_os_initialize_soft_isr(&diva_current->xdi_adapter.req_soft_isr, DIDpcRoutine, &diva_current->xdi_adapter)) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } /* Do not initialize second DPC - only one thread will be created */ diva_current->xdi_adapter.isr_soft_isr.object = diva_current->xdi_adapter.req_soft_isr.object; } if (v2) { prepare_qBri2_functions(&a->xdi_adapter); } else { prepare_qBri_functions(&a->xdi_adapter); } for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; if (i) memcpy(&diva_current->resources, &a->resources, sizeof(divas_card_resources_t)); diva_current->resources.pci.qoffset = (a->xdi_adapter.MemorySize >> factor); } /* Set up hardware related pointers */ a->xdi_adapter.cfg = (void *) (unsigned long) a->resources.pci.bar[0]; /* BAR0 CONFIG */ a->xdi_adapter.port = (void *) (unsigned long) a->resources.pci.bar[1]; /* BAR1 */ a->xdi_adapter.ctlReg = (void *) (unsigned long) a->resources.pci.bar[3]; /* BAR3 CNTRL */ for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; diva_4bri_set_addresses(diva_current); Slave = a->xdi_adapter.QuadroList->QuadroAdapter[i]; Slave->MultiMaster = &a->xdi_adapter; Slave->sdram_bar = a->xdi_adapter.sdram_bar; if (i) { Slave->serialNo = ((dword) (Slave->ControllerNumber << 24)) | a->xdi_adapter.serialNo; Slave->cardType = a->xdi_adapter.cardType; } } /* reset contains the base address for the PLX 9054 register set */ p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p); /* Set IRQ handler */ a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq; sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo); if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr, a->xdi_adapter.irq_info.irq_name)) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } a->xdi_adapter.irq_info.registered = 1; /* Add three slave adapters */ if (tasks > 1) { diva_add_slave_adapter(adapter_list[1]); diva_add_slave_adapter(adapter_list[2]); diva_add_slave_adapter(adapter_list[3]); } diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name, a->resources.pci.irq, a->xdi_adapter.serialNo); return (0); } /* ** Cleanup function will be called for master adapter only ** this is guaranteed by design: cleanup callback is set ** by master adapter only */ static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a) { int bar; /* Stop adapter if running */ if (a->xdi_adapter.Initialized) { diva_4bri_stop_adapter(a); } /* Remove IRQ handler */ if (a->xdi_adapter.irq_info.registered) { diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr); } a->xdi_adapter.irq_info.registered = 0; /* Free DPC's and spin locks on all adapters */ diva_4bri_cleanup_slave_adapters(a); /* Unmap all BARS */ for (bar = 0; bar < 4; bar++) { if (bar != 1) { if (a->resources.pci.bar[bar] && a->resources.pci.addr[bar]) { divasa_unmap_pci_bar(a->resources.pci.addr[bar]); a->resources.pci.bar[bar] = 0; a->resources.pci.addr[bar] = NULL; } } } /* Unregister I/O */ if (a->resources.pci.bar[1] && a->resources.pci.addr[1]) { diva_os_register_io_port(a, 0, a->resources.pci.bar[1], _4bri_is_rev_2_card(a-> CardOrdinal) ? _4bri_v2_bar_length[1] : _4bri_bar_length[1], &a->port_name[0], 1); a->resources.pci.bar[1] = 0; a->resources.pci.addr[1] = NULL; } if (a->slave_list) { diva_os_free(0, a->slave_list); a->slave_list = NULL; } return (0); } static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a) { dword data[64]; dword serNo; word addr, status, i, j; byte Bus, Slot; void *hdev; Bus = a->resources.pci.bus; Slot = a->resources.pci.func; hdev = a->resources.pci.hdev; for (i = 0; i < 64; ++i) { addr = i * 4; for (j = 0; j < 5; ++j) { PCIwrite(Bus, Slot, 0x4E, &addr, sizeof(addr), hdev); diva_os_wait(1); PCIread(Bus, Slot, 0x4E, &status, sizeof(status), hdev); if (status & 0x8000) break; } if (j >= 5) { DBG_ERR(("EEPROM[%d] read failed (0x%x)", i * 4, addr)) return (0); } PCIread(Bus, Slot, 0x50, &data[i], sizeof(data[i]), hdev); } DBG_BLK(((char *) &data[0], sizeof(data))) serNo = data[32]; if (serNo == 0 || serNo == 0xffffffff) serNo = data[63]; if (!serNo) { DBG_LOG(("W: Serial Number == 0, create one serial number")); serNo = a->resources.pci.bar[1] & 0xffff0000; serNo |= a->resources.pci.bus << 8; serNo |= a->resources.pci.func; } a->xdi_adapter.serialNo = serNo; DBG_REG(("Serial No. : %ld", a->xdi_adapter.serialNo)) return (serNo); } /* ** Release resources of slave adapters */ static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a) { diva_os_xdi_adapter_t *adapter_list[4]; diva_os_xdi_adapter_t *diva_current; int i; adapter_list[0] = a; adapter_list[1] = a->slave_adapters[0]; adapter_list[2] = a->slave_adapters[1]; adapter_list[3] = a->slave_adapters[2]; for (i = 0; i < a->xdi_adapter.tasks; i++) { diva_current = adapter_list[i]; if (diva_current) { diva_os_destroy_spin_lock(&diva_current-> xdi_adapter. isr_spin_lock, "unload"); diva_os_destroy_spin_lock(&diva_current-> xdi_adapter. data_spin_lock, "unload"); diva_os_cancel_soft_isr(&diva_current->xdi_adapter. req_soft_isr); diva_os_cancel_soft_isr(&diva_current->xdi_adapter. isr_soft_isr); diva_os_remove_soft_isr(&diva_current->xdi_adapter. req_soft_isr); diva_current->xdi_adapter.isr_soft_isr.object = NULL; if (diva_current->xdi_adapter.e_tbl) { diva_os_free(0, diva_current->xdi_adapter. e_tbl); } diva_current->xdi_adapter.e_tbl = NULL; diva_current->xdi_adapter.e_max = 0; diva_current->xdi_adapter.e_count = 0; } } return (0); } static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t *cmd, int length) { int ret = -1; if (cmd->adapter != a->controller) { DBG_ERR(("A: 4bri_cmd, invalid controller=%d != %d", cmd->adapter, a->controller)) return (-1); } switch (cmd->command) { case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->CardOrdinal; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_SERIAL_NR: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->xdi_adapter.serialNo; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG: if (!a->xdi_adapter.ControllerNumber) { /* Only master adapter can access hardware config */ a->xdi_mbox.data_length = sizeof(dword) * 9; a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { int i; dword *data = (dword *) a->xdi_mbox.data; for (i = 0; i < 8; i++) { *data++ = a->resources.pci.bar[i]; } *data++ = (dword) a->resources.pci.irq; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } break; case DIVA_XDI_UM_CMD_GET_CARD_STATE: if (!a->xdi_adapter.ControllerNumber) { a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { dword *data = (dword *) a->xdi_mbox.data; if (!a->xdi_adapter.ram || !a->xdi_adapter.reset || !a->xdi_adapter.cfg) { *data = 3; } else if (a->xdi_adapter.trapped) { *data = 2; } else if (a->xdi_adapter.Initialized) { *data = 1; } else { *data = 0; } a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } break; case DIVA_XDI_UM_CMD_WRITE_FPGA: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_write_fpga_image(a, (byte *)&cmd[1], cmd->command_data. write_fpga. image_length); } break; case DIVA_XDI_UM_CMD_RESET_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_reset_adapter(&a->xdi_adapter); } break; case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_write_sdram_block(&a->xdi_adapter, cmd-> command_data. write_sdram. offset, (byte *) & cmd[1], cmd-> command_data. write_sdram. length, a->xdi_adapter. MemorySize); } break; case DIVA_XDI_UM_CMD_START_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_start_adapter(&a->xdi_adapter, cmd->command_data. start.offset, cmd->command_data. start.features); } break; case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES: if (!a->xdi_adapter.ControllerNumber) { a->xdi_adapter.features = cmd->command_data.features.features; a->xdi_adapter.a.protocol_capabilities = a->xdi_adapter.features; DBG_TRC(("Set raw protocol features (%08x)", a->xdi_adapter.features)) ret = 0; } break; case DIVA_XDI_UM_CMD_STOP_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_stop_adapter(a); } break; case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY: ret = diva_card_read_xlog(a); break; case DIVA_XDI_UM_CMD_READ_SDRAM: if (!a->xdi_adapter.ControllerNumber && a->xdi_adapter.Address) { if ( (a->xdi_mbox.data_length = cmd->command_data.read_sdram.length)) { if ( (a->xdi_mbox.data_length + cmd->command_data.read_sdram.offset) < a->xdi_adapter.MemorySize) { a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox. data_length); if (a->xdi_mbox.data) { byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter); byte __iomem *src = p; byte *dst = a->xdi_mbox.data; dword len = a->xdi_mbox.data_length; src += cmd->command_data.read_sdram.offset; while (len--) { *dst++ = READ_BYTE(src++); } DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p); a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } } } break; default: DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller, cmd->command)) } return (ret); } void *xdiLoadFile(char *FileName, dword *FileLength, unsigned long lim) { void *ret = diva_xdiLoadFileFile; if (FileLength) { *FileLength = diva_xdiLoadFileLength; } diva_xdiLoadFileFile = NULL; diva_xdiLoadFileLength = 0; return (ret); } void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter) { } void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter) { } static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data, dword length) { int ret; diva_xdiLoadFileFile = data; diva_xdiLoadFileLength = length; ret = qBri_FPGA_download(&a->xdi_adapter); diva_xdiLoadFileFile = NULL; diva_xdiLoadFileLength = 0; return (ret ? 0 : -1); } static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter) { PISDN_ADAPTER Slave; int i; if (!IoAdapter->Address || !IoAdapter->reset) { return (-1); } if (IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't reset 4BRI adapter - please stop first", IoAdapter->ANum)) return (-1); } /* Forget all entities on all adapters */ for (i = 0; ((i < IoAdapter->tasks) && IoAdapter->QuadroList); i++) { Slave = IoAdapter->QuadroList->QuadroAdapter[i]; Slave->e_count = 0; if (Slave->e_tbl) { memset(Slave->e_tbl, 0x00, Slave->e_max * sizeof(E_INFO)); } Slave->head = 0; Slave->tail = 0; Slave->assign = 0; Slave->trapped = 0; memset(&Slave->a.IdTable[0], 0x00, sizeof(Slave->a.IdTable)); memset(&Slave->a.IdTypeTable[0], 0x00, sizeof(Slave->a.IdTypeTable)); memset(&Slave->a.FlowControlIdTable[0], 0x00, sizeof(Slave->a.FlowControlIdTable)); memset(&Slave->a.FlowControlSkipTable[0], 0x00, sizeof(Slave->a.FlowControlSkipTable)); memset(&Slave->a.misc_flags_table[0], 0x00, sizeof(Slave->a.misc_flags_table)); memset(&Slave->a.rx_stream[0], 0x00, sizeof(Slave->a.rx_stream)); memset(&Slave->a.tx_stream[0], 0x00, sizeof(Slave->a.tx_stream)); memset(&Slave->a.tx_pos[0], 0x00, sizeof(Slave->a.tx_pos)); memset(&Slave->a.rx_pos[0], 0x00, sizeof(Slave->a.rx_pos)); } return (0); } static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte *data, dword length, dword limit) { byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter); byte __iomem *mem = p; if (((address + length) >= limit) || !mem) { DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p); DBG_ERR(("A: A(%d) write 4BRI address=0x%08lx", IoAdapter->ANum, address + length)) return (-1); } mem += address; while (length--) { WRITE_BYTE(mem++, *data++); } DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p); return (0); } static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features) { volatile word __iomem *signature; int started = 0; int i; byte __iomem *p; /* start adapter */ start_qBri_hardware(IoAdapter); p = DIVA_OS_MEM_ATTACH_RAM(IoAdapter); /* wait for signature in shared memory (max. 3 seconds) */ signature = (volatile word __iomem *) (&p[0x1E]); for (i = 0; i < 300; ++i) { diva_os_wait(10); if (READ_WORD(&signature[0]) == 0x4447) { DBG_TRC(("Protocol startup time %d.%02d seconds", (i / 100), (i % 100))) started = 1; break; } } for (i = 1; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->features = IoAdapter->features; IoAdapter->QuadroList->QuadroAdapter[i]->a. protocol_capabilities = IoAdapter->features; } if (!started) { DBG_FTL(("%s: Adapter selftest failed, signature=%04x", IoAdapter->Properties.Name, READ_WORD(&signature[0]))) DIVA_OS_MEM_DETACH_RAM(IoAdapter, p); (*(IoAdapter->trapFnc)) (IoAdapter); IoAdapter->stop(IoAdapter); return (-1); } DIVA_OS_MEM_DETACH_RAM(IoAdapter, p); for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 1; IoAdapter->QuadroList->QuadroAdapter[i]->IrqCount = 0; } if (check_qBri_interrupt(IoAdapter)) { DBG_ERR(("A: A(%d) interrupt test failed", IoAdapter->ANum)) for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0; } IoAdapter->stop(IoAdapter); return (-1); } IoAdapter->Properties.Features = (word) features; diva_xdi_display_adapter_features(IoAdapter->ANum); for (i = 0; i < IoAdapter->tasks; i++) { DBG_LOG(("A(%d) %s adapter successfully started", IoAdapter->QuadroList->QuadroAdapter[i]->ANum, (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); IoAdapter->QuadroList->QuadroAdapter[i]->Properties.Features = (word) features; } return (0); } static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter) { #ifdef SUPPORT_INTERRUPT_TEST_ON_4BRI int i; ADAPTER *a = &IoAdapter->a; byte __iomem *p; IoAdapter->IrqCount = 0; if (IoAdapter->ControllerNumber > 0) return (-1); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); /* interrupt test */ a->ReadyInt = 1; a->ram_out(a, &PR_RAM->ReadyInt, 1); for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10)); return ((IoAdapter->IrqCount > 0) ? 0 : -1); #else dword volatile __iomem *qBriIrq; byte __iomem *p; /* Reset on-board interrupt register */ IoAdapter->IrqCount = 0; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *) (&p[_4bri_is_rev_2_card (IoAdapter-> cardType) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); diva_os_wait(100); return (0); #endif /* SUPPORT_INTERRUPT_TEST_ON_4BRI */ } static void diva_4bri_clear_interrupts(diva_os_xdi_adapter_t *a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; /* clear any pending interrupt */ IoAdapter->disIrq(IoAdapter); IoAdapter->tst_irq(&IoAdapter->a); IoAdapter->clr_irq(&IoAdapter->a); IoAdapter->tst_irq(&IoAdapter->a); /* kill pending dpcs */ diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr); diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr); } static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; int i; if (!IoAdapter->ram) { return (-1); } if (!IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't stop PRI adapter - not running", IoAdapter->ANum)) return (-1); /* nothing to stop */ } for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0; } /* Disconnect Adapters from DIDD */ for (i = 0; i < IoAdapter->tasks; i++) { diva_xdi_didd_remove_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); } i = 100; /* Stop interrupts */ a->clear_interrupts_proc = diva_4bri_clear_interrupts; IoAdapter->a.ReadyInt = 1; IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt); do { diva_os_sleep(10); } while (i-- && a->clear_interrupts_proc); if (a->clear_interrupts_proc) { diva_4bri_clear_interrupts(a); a->clear_interrupts_proc = NULL; DBG_ERR(("A: A(%d) no final interrupt from 4BRI adapter", IoAdapter->ANum)) } IoAdapter->a.ReadyInt = 0; /* Stop and reset adapter */ IoAdapter->stop(IoAdapter); return (0); }
gpl-2.0
major91/Zeta_Chromium-L
sound/soc/fsl/efika-audio-fabric.c
9596
2150
/* * Efika driver for the PSC of the Freescale MPC52xx * configured as AC97 interface * * Copyright 2008 Jon Smirl, Digispeaker * Author: Jon Smirl <jonsmirl@gmail.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include "mpc5200_dma.h" #include "mpc5200_psc_ac97.h" #include "../codecs/stac9766.h" #define DRV_NAME "efika-audio-fabric" static struct snd_soc_dai_link efika_fabric_dai[] = { { .name = "AC97", .stream_name = "AC97 Analog", .codec_dai_name = "stac9766-hifi-analog", .cpu_dai_name = "mpc5200-psc-ac97.0", .platform_name = "mpc5200-pcm-audio", .codec_name = "stac9766-codec", }, { .name = "AC97", .stream_name = "AC97 IEC958", .codec_dai_name = "stac9766-hifi-IEC958", .cpu_dai_name = "mpc5200-psc-ac97.1", .platform_name = "mpc5200-pcm-audio", .codec_name = "stac9766-codec", }, }; static struct snd_soc_card card = { .name = "Efika", .owner = THIS_MODULE, .dai_link = efika_fabric_dai, .num_links = ARRAY_SIZE(efika_fabric_dai), }; static __init int efika_fabric_init(void) { struct platform_device *pdev; int rc; if (!of_machine_is_compatible("bplan,efika")) return -ENODEV; pdev = platform_device_alloc("soc-audio", 1); if (!pdev) { pr_err("efika_fabric_init: platform_device_alloc() failed\n"); return -ENODEV; } platform_set_drvdata(pdev, &card); rc = platform_device_add(pdev); if (rc) { pr_err("efika_fabric_init: platform_device_add() failed\n"); platform_device_put(pdev); return -ENODEV; } return 0; } module_init(efika_fabric_init); MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); MODULE_DESCRIPTION(DRV_NAME ": mpc5200 Efika fabric driver"); MODULE_LICENSE("GPL");
gpl-2.0
vredniiy/Jiayu-G4
arch/alpha/lib/udelay.c
13180
1119
/* * Copyright (C) 1993, 2000 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. */ #include <linux/module.h> #include <linux/sched.h> /* for udelay's use of smp_processor_id */ #include <asm/param.h> #include <asm/smp.h> #include <linux/delay.h> /* * Use only for very small delays (< 1 msec). * * The active part of our cycle counter is only 32-bits wide, and * we're treating the difference between two marks as signed. On * a 1GHz box, that's about 2 seconds. */ void __delay(int loops) { int tmp; __asm__ __volatile__( " rpcc %0\n" " addl %1,%0,%1\n" "1: rpcc %0\n" " subl %1,%0,%0\n" " bgt %0,1b" : "=&r" (tmp), "=r" (loops) : "1"(loops)); } #ifdef CONFIG_SMP #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy #else #define LPJ loops_per_jiffy #endif void udelay(unsigned long usecs) { usecs *= (((unsigned long)HZ << 32) / 1000000) * LPJ; __delay((long)usecs >> 32); } EXPORT_SYMBOL(udelay); void ndelay(unsigned long nsecs) { nsecs *= (((unsigned long)HZ << 32) / 1000000000) * LPJ; __delay((long)nsecs >> 32); } EXPORT_SYMBOL(ndelay);
gpl-2.0
uoaerg/linux-dccp
drivers/pinctrl/spear/pinctrl-spear1310.c
893
77749
/* * Driver for the ST Microelectronics SPEAr1310 pinmux * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/init.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include "pinctrl-spear.h" #define DRIVER_NAME "spear1310-pinmux" /* pins */ static const struct pinctrl_pin_desc spear1310_pins[] = { SPEAR_PIN_0_TO_101, SPEAR_PIN_102_TO_245, }; /* registers */ #define PERIP_CFG 0x3B0 #define MCIF_SEL_SHIFT 5 #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT) #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT) #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT) #define MCIF_SEL_MASK (0x3 << MCIF_SEL_SHIFT) #define PCIE_SATA_CFG 0x3A4 #define PCIE_SATA2_SEL_PCIE (0 << 31) #define PCIE_SATA1_SEL_PCIE (0 << 30) #define PCIE_SATA0_SEL_PCIE (0 << 29) #define PCIE_SATA2_SEL_SATA (1 << 31) #define PCIE_SATA1_SEL_SATA (1 << 30) #define PCIE_SATA0_SEL_SATA (1 << 29) #define SATA2_CFG_TX_CLK_EN (1 << 27) #define SATA2_CFG_RX_CLK_EN (1 << 26) #define SATA2_CFG_POWERUP_RESET (1 << 25) #define SATA2_CFG_PM_CLK_EN (1 << 24) #define SATA1_CFG_TX_CLK_EN (1 << 23) #define SATA1_CFG_RX_CLK_EN (1 << 22) #define SATA1_CFG_POWERUP_RESET (1 << 21) #define SATA1_CFG_PM_CLK_EN (1 << 20) #define SATA0_CFG_TX_CLK_EN (1 << 19) #define SATA0_CFG_RX_CLK_EN (1 << 18) #define SATA0_CFG_POWERUP_RESET (1 << 17) #define SATA0_CFG_PM_CLK_EN (1 << 16) #define PCIE2_CFG_DEVICE_PRESENT (1 << 11) #define PCIE2_CFG_POWERUP_RESET (1 << 10) #define PCIE2_CFG_CORE_CLK_EN (1 << 9) #define PCIE2_CFG_AUX_CLK_EN (1 << 8) #define PCIE1_CFG_DEVICE_PRESENT (1 << 7) #define PCIE1_CFG_POWERUP_RESET (1 << 6) #define PCIE1_CFG_CORE_CLK_EN (1 << 5) #define PCIE1_CFG_AUX_CLK_EN (1 << 4) #define PCIE0_CFG_DEVICE_PRESENT (1 << 3) #define PCIE0_CFG_POWERUP_RESET (1 << 2) #define PCIE0_CFG_CORE_CLK_EN (1 << 1) #define PCIE0_CFG_AUX_CLK_EN (1 << 0) #define PAD_FUNCTION_EN_0 0x650 #define PMX_UART0_MASK (1 << 1) #define PMX_I2C0_MASK (1 << 2) #define PMX_I2S0_MASK (1 << 3) #define PMX_SSP0_MASK (1 << 4) #define PMX_CLCD1_MASK (1 << 5) #define PMX_EGPIO00_MASK (1 << 6) #define PMX_EGPIO01_MASK (1 << 7) #define PMX_EGPIO02_MASK (1 << 8) #define PMX_EGPIO03_MASK (1 << 9) #define PMX_EGPIO04_MASK (1 << 10) #define PMX_EGPIO05_MASK (1 << 11) #define PMX_EGPIO06_MASK (1 << 12) #define PMX_EGPIO07_MASK (1 << 13) #define PMX_EGPIO08_MASK (1 << 14) #define PMX_EGPIO09_MASK (1 << 15) #define PMX_SMI_MASK (1 << 16) #define PMX_NAND8_MASK (1 << 17) #define PMX_GMIICLK_MASK (1 << 18) #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK (1 << 19) #define PMX_RXCLK_RDV_TXEN_D03_MASK (1 << 20) #define PMX_GMIID47_MASK (1 << 21) #define PMX_MDC_MDIO_MASK (1 << 22) #define PMX_MCI_DATA8_15_MASK (1 << 23) #define PMX_NFAD23_MASK (1 << 24) #define PMX_NFAD24_MASK (1 << 25) #define PMX_NFAD25_MASK (1 << 26) #define PMX_NFCE3_MASK (1 << 27) #define PMX_NFWPRT3_MASK (1 << 28) #define PMX_NFRSTPWDWN0_MASK (1 << 29) #define PMX_NFRSTPWDWN1_MASK (1 << 30) #define PMX_NFRSTPWDWN2_MASK (1 << 31) #define PAD_FUNCTION_EN_1 0x654 #define PMX_NFRSTPWDWN3_MASK (1 << 0) #define PMX_SMINCS2_MASK (1 << 1) #define PMX_SMINCS3_MASK (1 << 2) #define PMX_CLCD2_MASK (1 << 3) #define PMX_KBD_ROWCOL68_MASK (1 << 4) #define PMX_EGPIO10_MASK (1 << 5) #define PMX_EGPIO11_MASK (1 << 6) #define PMX_EGPIO12_MASK (1 << 7) #define PMX_EGPIO13_MASK (1 << 8) #define PMX_EGPIO14_MASK (1 << 9) #define PMX_EGPIO15_MASK (1 << 10) #define PMX_UART0_MODEM_MASK (1 << 11) #define PMX_GPT0_TMR0_MASK (1 << 12) #define PMX_GPT0_TMR1_MASK (1 << 13) #define PMX_GPT1_TMR0_MASK (1 << 14) #define PMX_GPT1_TMR1_MASK (1 << 15) #define PMX_I2S1_MASK (1 << 16) #define PMX_KBD_ROWCOL25_MASK (1 << 17) #define PMX_NFIO8_15_MASK (1 << 18) #define PMX_KBD_COL1_MASK (1 << 19) #define PMX_NFCE1_MASK (1 << 20) #define PMX_KBD_COL0_MASK (1 << 21) #define PMX_NFCE2_MASK (1 << 22) #define PMX_KBD_ROW1_MASK (1 << 23) #define PMX_NFWPRT1_MASK (1 << 24) #define PMX_KBD_ROW0_MASK (1 << 25) #define PMX_NFWPRT2_MASK (1 << 26) #define PMX_MCIDATA0_MASK (1 << 27) #define PMX_MCIDATA1_MASK (1 << 28) #define PMX_MCIDATA2_MASK (1 << 29) #define PMX_MCIDATA3_MASK (1 << 30) #define PMX_MCIDATA4_MASK (1 << 31) #define PAD_FUNCTION_EN_2 0x658 #define PMX_MCIDATA5_MASK (1 << 0) #define PMX_MCIDATA6_MASK (1 << 1) #define PMX_MCIDATA7_MASK (1 << 2) #define PMX_MCIDATA1SD_MASK (1 << 3) #define PMX_MCIDATA2SD_MASK (1 << 4) #define PMX_MCIDATA3SD_MASK (1 << 5) #define PMX_MCIADDR0ALE_MASK (1 << 6) #define PMX_MCIADDR1CLECLK_MASK (1 << 7) #define PMX_MCIADDR2_MASK (1 << 8) #define PMX_MCICECF_MASK (1 << 9) #define PMX_MCICEXD_MASK (1 << 10) #define PMX_MCICESDMMC_MASK (1 << 11) #define PMX_MCICDCF1_MASK (1 << 12) #define PMX_MCICDCF2_MASK (1 << 13) #define PMX_MCICDXD_MASK (1 << 14) #define PMX_MCICDSDMMC_MASK (1 << 15) #define PMX_MCIDATADIR_MASK (1 << 16) #define PMX_MCIDMARQWP_MASK (1 << 17) #define PMX_MCIIORDRE_MASK (1 << 18) #define PMX_MCIIOWRWE_MASK (1 << 19) #define PMX_MCIRESETCF_MASK (1 << 20) #define PMX_MCICS0CE_MASK (1 << 21) #define PMX_MCICFINTR_MASK (1 << 22) #define PMX_MCIIORDY_MASK (1 << 23) #define PMX_MCICS1_MASK (1 << 24) #define PMX_MCIDMAACK_MASK (1 << 25) #define PMX_MCISDCMD_MASK (1 << 26) #define PMX_MCILEDS_MASK (1 << 27) #define PMX_TOUCH_XY_MASK (1 << 28) #define PMX_SSP0_CS0_MASK (1 << 29) #define PMX_SSP0_CS1_2_MASK (1 << 30) #define PAD_DIRECTION_SEL_0 0x65C #define PAD_DIRECTION_SEL_1 0x660 #define PAD_DIRECTION_SEL_2 0x664 /* combined macros */ #define PMX_GMII_MASK (PMX_GMIICLK_MASK | \ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \ PMX_RXCLK_RDV_TXEN_D03_MASK | \ PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK) #define PMX_EGPIO_0_GRP_MASK (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK | \ PMX_EGPIO02_MASK | \ PMX_EGPIO03_MASK | PMX_EGPIO04_MASK | \ PMX_EGPIO05_MASK | PMX_EGPIO06_MASK | \ PMX_EGPIO07_MASK | PMX_EGPIO08_MASK | \ PMX_EGPIO09_MASK) #define PMX_EGPIO_1_GRP_MASK (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK | \ PMX_EGPIO12_MASK | PMX_EGPIO13_MASK | \ PMX_EGPIO14_MASK | PMX_EGPIO15_MASK) #define PMX_KEYBOARD_6X6_MASK (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \ PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \ PMX_KBD_COL1_MASK) #define PMX_NAND8BIT_0_MASK (PMX_NAND8_MASK | PMX_NFAD23_MASK | \ PMX_NFAD24_MASK | PMX_NFAD25_MASK | \ PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \ PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \ PMX_NFCE3_MASK) #define PMX_NAND8BIT_1_MASK PMX_NFRSTPWDWN3_MASK #define PMX_NAND16BIT_1_MASK (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK) #define PMX_NAND_4CHIPS_MASK (PMX_NFCE1_MASK | PMX_NFCE2_MASK | \ PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK | \ PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \ PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK) #define PMX_MCIFALL_1_MASK 0xF8000000 #define PMX_MCIFALL_2_MASK 0x0FFFFFFF #define PMX_PCI_REG1_MASK (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK | \ PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \ PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \ PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \ PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK | \ PMX_NFCE2_MASK) #define PMX_PCI_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \ PMX_SSP0_CS1_2_MASK) #define PMX_SMII_0_1_2_MASK (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK) #define PMX_RGMII_REG0_MASK (PMX_MCI_DATA8_15_MASK | \ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \ PMX_GMIID47_MASK) #define PMX_RGMII_REG1_MASK (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\ PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK | \ PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK) #define PMX_RGMII_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \ PMX_SSP0_CS1_2_MASK) #define PCIE_CFG_VAL(x) (PCIE_SATA##x##_SEL_PCIE | \ PCIE##x##_CFG_AUX_CLK_EN | \ PCIE##x##_CFG_CORE_CLK_EN | \ PCIE##x##_CFG_POWERUP_RESET | \ PCIE##x##_CFG_DEVICE_PRESENT) #define SATA_CFG_VAL(x) (PCIE_SATA##x##_SEL_SATA | \ SATA##x##_CFG_PM_CLK_EN | \ SATA##x##_CFG_POWERUP_RESET | \ SATA##x##_CFG_RX_CLK_EN | \ SATA##x##_CFG_TX_CLK_EN) /* Pad multiplexing for i2c0 device */ static const unsigned i2c0_pins[] = { 102, 103 }; static struct spear_muxreg i2c0_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2C0_MASK, .val = PMX_I2C0_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_I2C0_MASK, .val = PMX_I2C0_MASK, }, }; static struct spear_modemux i2c0_modemux[] = { { .muxregs = i2c0_muxreg, .nmuxregs = ARRAY_SIZE(i2c0_muxreg), }, }; static struct spear_pingroup i2c0_pingroup = { .name = "i2c0_grp", .pins = i2c0_pins, .npins = ARRAY_SIZE(i2c0_pins), .modemuxs = i2c0_modemux, .nmodemuxs = ARRAY_SIZE(i2c0_modemux), }; static const char *const i2c0_grps[] = { "i2c0_grp" }; static struct spear_function i2c0_function = { .name = "i2c0", .groups = i2c0_grps, .ngroups = ARRAY_SIZE(i2c0_grps), }; /* Pad multiplexing for ssp0 device */ static const unsigned ssp0_pins[] = { 109, 110, 111, 112 }; static struct spear_muxreg ssp0_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SSP0_MASK, .val = PMX_SSP0_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_SSP0_MASK, .val = PMX_SSP0_MASK, }, }; static struct spear_modemux ssp0_modemux[] = { { .muxregs = ssp0_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_muxreg), }, }; static struct spear_pingroup ssp0_pingroup = { .name = "ssp0_grp", .pins = ssp0_pins, .npins = ARRAY_SIZE(ssp0_pins), .modemuxs = ssp0_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_modemux), }; /* Pad multiplexing for ssp0_cs0 device */ static const unsigned ssp0_cs0_pins[] = { 96 }; static struct spear_muxreg ssp0_cs0_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_SSP0_CS0_MASK, .val = PMX_SSP0_CS0_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_SSP0_CS0_MASK, .val = PMX_SSP0_CS0_MASK, }, }; static struct spear_modemux ssp0_cs0_modemux[] = { { .muxregs = ssp0_cs0_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg), }, }; static struct spear_pingroup ssp0_cs0_pingroup = { .name = "ssp0_cs0_grp", .pins = ssp0_cs0_pins, .npins = ARRAY_SIZE(ssp0_cs0_pins), .modemuxs = ssp0_cs0_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux), }; /* ssp0_cs1_2 device */ static const unsigned ssp0_cs1_2_pins[] = { 94, 95 }; static struct spear_muxreg ssp0_cs1_2_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_SSP0_CS1_2_MASK, .val = PMX_SSP0_CS1_2_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_SSP0_CS1_2_MASK, .val = PMX_SSP0_CS1_2_MASK, }, }; static struct spear_modemux ssp0_cs1_2_modemux[] = { { .muxregs = ssp0_cs1_2_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg), }, }; static struct spear_pingroup ssp0_cs1_2_pingroup = { .name = "ssp0_cs1_2_grp", .pins = ssp0_cs1_2_pins, .npins = ARRAY_SIZE(ssp0_cs1_2_pins), .modemuxs = ssp0_cs1_2_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux), }; static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp" }; static struct spear_function ssp0_function = { .name = "ssp0", .groups = ssp0_grps, .ngroups = ARRAY_SIZE(ssp0_grps), }; /* Pad multiplexing for i2s0 device */ static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 }; static struct spear_muxreg i2s0_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK, .val = PMX_I2S0_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_I2S0_MASK, .val = PMX_I2S0_MASK, }, }; static struct spear_modemux i2s0_modemux[] = { { .muxregs = i2s0_muxreg, .nmuxregs = ARRAY_SIZE(i2s0_muxreg), }, }; static struct spear_pingroup i2s0_pingroup = { .name = "i2s0_grp", .pins = i2s0_pins, .npins = ARRAY_SIZE(i2s0_pins), .modemuxs = i2s0_modemux, .nmodemuxs = ARRAY_SIZE(i2s0_modemux), }; static const char *const i2s0_grps[] = { "i2s0_grp" }; static struct spear_function i2s0_function = { .name = "i2s0", .groups = i2s0_grps, .ngroups = ARRAY_SIZE(i2s0_grps), }; /* Pad multiplexing for i2s1 device */ static const unsigned i2s1_pins[] = { 0, 1, 2, 3 }; static struct spear_muxreg i2s1_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_I2S1_MASK, .val = PMX_I2S1_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_I2S1_MASK, .val = PMX_I2S1_MASK, }, }; static struct spear_modemux i2s1_modemux[] = { { .muxregs = i2s1_muxreg, .nmuxregs = ARRAY_SIZE(i2s1_muxreg), }, }; static struct spear_pingroup i2s1_pingroup = { .name = "i2s1_grp", .pins = i2s1_pins, .npins = ARRAY_SIZE(i2s1_pins), .modemuxs = i2s1_modemux, .nmodemuxs = ARRAY_SIZE(i2s1_modemux), }; static const char *const i2s1_grps[] = { "i2s1_grp" }; static struct spear_function i2s1_function = { .name = "i2s1", .groups = i2s1_grps, .ngroups = ARRAY_SIZE(i2s1_grps), }; /* Pad multiplexing for clcd device */ static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142 }; static struct spear_muxreg clcd_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, }, }; static struct spear_modemux clcd_modemux[] = { { .muxregs = clcd_muxreg, .nmuxregs = ARRAY_SIZE(clcd_muxreg), }, }; static struct spear_pingroup clcd_pingroup = { .name = "clcd_grp", .pins = clcd_pins, .npins = ARRAY_SIZE(clcd_pins), .modemuxs = clcd_modemux, .nmodemuxs = ARRAY_SIZE(clcd_modemux), }; static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 }; static struct spear_muxreg clcd_high_res_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_CLCD2_MASK, .val = PMX_CLCD2_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_CLCD2_MASK, .val = PMX_CLCD2_MASK, }, }; static struct spear_modemux clcd_high_res_modemux[] = { { .muxregs = clcd_high_res_muxreg, .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg), }, }; static struct spear_pingroup clcd_high_res_pingroup = { .name = "clcd_high_res_grp", .pins = clcd_high_res_pins, .npins = ARRAY_SIZE(clcd_high_res_pins), .modemuxs = clcd_high_res_modemux, .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux), }; static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" }; static struct spear_function clcd_function = { .name = "clcd", .groups = clcd_grps, .ngroups = ARRAY_SIZE(clcd_grps), }; static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152 }; static struct spear_muxreg arm_gpio_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_EGPIO_0_GRP_MASK, .val = PMX_EGPIO_0_GRP_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_EGPIO_1_GRP_MASK, .val = PMX_EGPIO_1_GRP_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_EGPIO_0_GRP_MASK, .val = PMX_EGPIO_0_GRP_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_EGPIO_1_GRP_MASK, .val = PMX_EGPIO_1_GRP_MASK, }, }; static struct spear_modemux arm_gpio_modemux[] = { { .muxregs = arm_gpio_muxreg, .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg), }, }; static struct spear_pingroup arm_gpio_pingroup = { .name = "arm_gpio_grp", .pins = arm_gpio_pins, .npins = ARRAY_SIZE(arm_gpio_pins), .modemuxs = arm_gpio_modemux, .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux), }; static const char *const arm_gpio_grps[] = { "arm_gpio_grp" }; static struct spear_function arm_gpio_function = { .name = "arm_gpio", .groups = arm_gpio_grps, .ngroups = ARRAY_SIZE(arm_gpio_grps), }; /* Pad multiplexing for smi 2 chips device */ static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 }; static struct spear_muxreg smi_2_chips_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, }, }; static struct spear_modemux smi_2_chips_modemux[] = { { .muxregs = smi_2_chips_muxreg, .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg), }, }; static struct spear_pingroup smi_2_chips_pingroup = { .name = "smi_2_chips_grp", .pins = smi_2_chips_pins, .npins = ARRAY_SIZE(smi_2_chips_pins), .modemuxs = smi_2_chips_modemux, .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux), }; static const unsigned smi_4_chips_pins[] = { 54, 55 }; static struct spear_muxreg smi_4_chips_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, }, }; static struct spear_modemux smi_4_chips_modemux[] = { { .muxregs = smi_4_chips_muxreg, .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg), }, }; static struct spear_pingroup smi_4_chips_pingroup = { .name = "smi_4_chips_grp", .pins = smi_4_chips_pins, .npins = ARRAY_SIZE(smi_4_chips_pins), .modemuxs = smi_4_chips_modemux, .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux), }; static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" }; static struct spear_function smi_function = { .name = "smi", .groups = smi_grps, .ngroups = ARRAY_SIZE(smi_grps), }; /* Pad multiplexing for gmii device */ static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200 }; static struct spear_muxreg gmii_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_GMII_MASK, .val = PMX_GMII_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_GMII_MASK, .val = PMX_GMII_MASK, }, }; static struct spear_modemux gmii_modemux[] = { { .muxregs = gmii_muxreg, .nmuxregs = ARRAY_SIZE(gmii_muxreg), }, }; static struct spear_pingroup gmii_pingroup = { .name = "gmii_grp", .pins = gmii_pins, .npins = ARRAY_SIZE(gmii_pins), .modemuxs = gmii_modemux, .nmodemuxs = ARRAY_SIZE(gmii_modemux), }; static const char *const gmii_grps[] = { "gmii_grp" }; static struct spear_function gmii_function = { .name = "gmii", .groups = gmii_grps, .ngroups = ARRAY_SIZE(gmii_grps), }; /* Pad multiplexing for rgmii device */ static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175, 180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 }; static struct spear_muxreg rgmii_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_RGMII_REG0_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_RGMII_REG1_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_2, .mask = PMX_RGMII_REG2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_RGMII_REG0_MASK, .val = PMX_RGMII_REG0_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_RGMII_REG1_MASK, .val = PMX_RGMII_REG1_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_RGMII_REG2_MASK, .val = PMX_RGMII_REG2_MASK, }, }; static struct spear_modemux rgmii_modemux[] = { { .muxregs = rgmii_muxreg, .nmuxregs = ARRAY_SIZE(rgmii_muxreg), }, }; static struct spear_pingroup rgmii_pingroup = { .name = "rgmii_grp", .pins = rgmii_pins, .npins = ARRAY_SIZE(rgmii_pins), .modemuxs = rgmii_modemux, .nmodemuxs = ARRAY_SIZE(rgmii_modemux), }; static const char *const rgmii_grps[] = { "rgmii_grp" }; static struct spear_function rgmii_function = { .name = "rgmii", .groups = rgmii_grps, .ngroups = ARRAY_SIZE(rgmii_grps), }; /* Pad multiplexing for smii_0_1_2 device */ static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55 }; static struct spear_muxreg smii_0_1_2_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_SMII_0_1_2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_SMII_0_1_2_MASK, .val = PMX_SMII_0_1_2_MASK, }, }; static struct spear_modemux smii_0_1_2_modemux[] = { { .muxregs = smii_0_1_2_muxreg, .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg), }, }; static struct spear_pingroup smii_0_1_2_pingroup = { .name = "smii_0_1_2_grp", .pins = smii_0_1_2_pins, .npins = ARRAY_SIZE(smii_0_1_2_pins), .modemuxs = smii_0_1_2_modemux, .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux), }; static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" }; static struct spear_function smii_0_1_2_function = { .name = "smii_0_1_2", .groups = smii_0_1_2_grps, .ngroups = ARRAY_SIZE(smii_0_1_2_grps), }; /* Pad multiplexing for ras_mii_txclk device */ static const unsigned ras_mii_txclk_pins[] = { 98, 99 }; static struct spear_muxreg ras_mii_txclk_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NFCE2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_NFCE2_MASK, .val = PMX_NFCE2_MASK, }, }; static struct spear_modemux ras_mii_txclk_modemux[] = { { .muxregs = ras_mii_txclk_muxreg, .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg), }, }; static struct spear_pingroup ras_mii_txclk_pingroup = { .name = "ras_mii_txclk_grp", .pins = ras_mii_txclk_pins, .npins = ARRAY_SIZE(ras_mii_txclk_pins), .modemuxs = ras_mii_txclk_modemux, .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux), }; static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" }; static struct spear_function ras_mii_txclk_function = { .name = "ras_mii_txclk", .groups = ras_mii_txclk_grps, .ngroups = ARRAY_SIZE(ras_mii_txclk_grps), }; /* Pad multiplexing for nand 8bit device (cs0 only) */ static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212 }; static struct spear_muxreg nand_8bit_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_NAND8BIT_0_MASK, .val = PMX_NAND8BIT_0_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND8BIT_1_MASK, .val = PMX_NAND8BIT_1_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_NAND8BIT_0_MASK, .val = PMX_NAND8BIT_0_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_NAND8BIT_1_MASK, .val = PMX_NAND8BIT_1_MASK, }, }; static struct spear_modemux nand_8bit_modemux[] = { { .muxregs = nand_8bit_muxreg, .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg), }, }; static struct spear_pingroup nand_8bit_pingroup = { .name = "nand_8bit_grp", .pins = nand_8bit_pins, .npins = ARRAY_SIZE(nand_8bit_pins), .modemuxs = nand_8bit_modemux, .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux), }; /* Pad multiplexing for nand 16bit device */ static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209, 210 }; static struct spear_muxreg nand_16bit_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND16BIT_1_MASK, .val = PMX_NAND16BIT_1_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_NAND16BIT_1_MASK, .val = PMX_NAND16BIT_1_MASK, }, }; static struct spear_modemux nand_16bit_modemux[] = { { .muxregs = nand_16bit_muxreg, .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg), }, }; static struct spear_pingroup nand_16bit_pingroup = { .name = "nand_16bit_grp", .pins = nand_16bit_pins, .npins = ARRAY_SIZE(nand_16bit_pins), .modemuxs = nand_16bit_modemux, .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux), }; /* Pad multiplexing for nand 4 chips */ static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 }; static struct spear_muxreg nand_4_chips_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND_4CHIPS_MASK, .val = PMX_NAND_4CHIPS_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_NAND_4CHIPS_MASK, .val = PMX_NAND_4CHIPS_MASK, }, }; static struct spear_modemux nand_4_chips_modemux[] = { { .muxregs = nand_4_chips_muxreg, .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg), }, }; static struct spear_pingroup nand_4_chips_pingroup = { .name = "nand_4_chips_grp", .pins = nand_4_chips_pins, .npins = ARRAY_SIZE(nand_4_chips_pins), .modemuxs = nand_4_chips_modemux, .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux), }; static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp", "nand_4_chips_grp" }; static struct spear_function nand_function = { .name = "nand", .groups = nand_grps, .ngroups = ARRAY_SIZE(nand_grps), }; /* Pad multiplexing for keyboard_6x6 device */ static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212 }; static struct spear_muxreg keyboard_6x6_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK, .val = PMX_KEYBOARD_6X6_MASK, }, }; static struct spear_modemux keyboard_6x6_modemux[] = { { .muxregs = keyboard_6x6_muxreg, .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg), }, }; static struct spear_pingroup keyboard_6x6_pingroup = { .name = "keyboard_6x6_grp", .pins = keyboard_6x6_pins, .npins = ARRAY_SIZE(keyboard_6x6_pins), .modemuxs = keyboard_6x6_modemux, .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux), }; /* Pad multiplexing for keyboard_rowcol6_8 device */ static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 }; static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL68_MASK, .val = PMX_KBD_ROWCOL68_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_KBD_ROWCOL68_MASK, .val = PMX_KBD_ROWCOL68_MASK, }, }; static struct spear_modemux keyboard_rowcol6_8_modemux[] = { { .muxregs = keyboard_rowcol6_8_muxreg, .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg), }, }; static struct spear_pingroup keyboard_rowcol6_8_pingroup = { .name = "keyboard_rowcol6_8_grp", .pins = keyboard_rowcol6_8_pins, .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins), .modemuxs = keyboard_rowcol6_8_modemux, .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux), }; static const char *const keyboard_grps[] = { "keyboard_6x6_grp", "keyboard_rowcol6_8_grp" }; static struct spear_function keyboard_function = { .name = "keyboard", .groups = keyboard_grps, .ngroups = ARRAY_SIZE(keyboard_grps), }; /* Pad multiplexing for uart0 device */ static const unsigned uart0_pins[] = { 100, 101 }; static struct spear_muxreg uart0_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_UART0_MASK, .val = PMX_UART0_MASK, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_UART0_MASK, .val = PMX_UART0_MASK, }, }; static struct spear_modemux uart0_modemux[] = { { .muxregs = uart0_muxreg, .nmuxregs = ARRAY_SIZE(uart0_muxreg), }, }; static struct spear_pingroup uart0_pingroup = { .name = "uart0_grp", .pins = uart0_pins, .npins = ARRAY_SIZE(uart0_pins), .modemuxs = uart0_modemux, .nmodemuxs = ARRAY_SIZE(uart0_modemux), }; /* Pad multiplexing for uart0_modem device */ static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 }; static struct spear_muxreg uart0_modem_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_UART0_MODEM_MASK, .val = PMX_UART0_MODEM_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_UART0_MODEM_MASK, .val = PMX_UART0_MODEM_MASK, }, }; static struct spear_modemux uart0_modem_modemux[] = { { .muxregs = uart0_modem_muxreg, .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg), }, }; static struct spear_pingroup uart0_modem_pingroup = { .name = "uart0_modem_grp", .pins = uart0_modem_pins, .npins = ARRAY_SIZE(uart0_modem_pins), .modemuxs = uart0_modem_modemux, .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux), }; static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" }; static struct spear_function uart0_function = { .name = "uart0", .groups = uart0_grps, .ngroups = ARRAY_SIZE(uart0_grps), }; /* Pad multiplexing for gpt0_tmr0 device */ static const unsigned gpt0_tmr0_pins[] = { 10, 11 }; static struct spear_muxreg gpt0_tmr0_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT0_TMR0_MASK, .val = PMX_GPT0_TMR0_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_GPT0_TMR0_MASK, .val = PMX_GPT0_TMR0_MASK, }, }; static struct spear_modemux gpt0_tmr0_modemux[] = { { .muxregs = gpt0_tmr0_muxreg, .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg), }, }; static struct spear_pingroup gpt0_tmr0_pingroup = { .name = "gpt0_tmr0_grp", .pins = gpt0_tmr0_pins, .npins = ARRAY_SIZE(gpt0_tmr0_pins), .modemuxs = gpt0_tmr0_modemux, .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux), }; /* Pad multiplexing for gpt0_tmr1 device */ static const unsigned gpt0_tmr1_pins[] = { 8, 9 }; static struct spear_muxreg gpt0_tmr1_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT0_TMR1_MASK, .val = PMX_GPT0_TMR1_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_GPT0_TMR1_MASK, .val = PMX_GPT0_TMR1_MASK, }, }; static struct spear_modemux gpt0_tmr1_modemux[] = { { .muxregs = gpt0_tmr1_muxreg, .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg), }, }; static struct spear_pingroup gpt0_tmr1_pingroup = { .name = "gpt0_tmr1_grp", .pins = gpt0_tmr1_pins, .npins = ARRAY_SIZE(gpt0_tmr1_pins), .modemuxs = gpt0_tmr1_modemux, .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux), }; static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" }; static struct spear_function gpt0_function = { .name = "gpt0", .groups = gpt0_grps, .ngroups = ARRAY_SIZE(gpt0_grps), }; /* Pad multiplexing for gpt1_tmr0 device */ static const unsigned gpt1_tmr0_pins[] = { 6, 7 }; static struct spear_muxreg gpt1_tmr0_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT1_TMR0_MASK, .val = PMX_GPT1_TMR0_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_GPT1_TMR0_MASK, .val = PMX_GPT1_TMR0_MASK, }, }; static struct spear_modemux gpt1_tmr0_modemux[] = { { .muxregs = gpt1_tmr0_muxreg, .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg), }, }; static struct spear_pingroup gpt1_tmr0_pingroup = { .name = "gpt1_tmr0_grp", .pins = gpt1_tmr0_pins, .npins = ARRAY_SIZE(gpt1_tmr0_pins), .modemuxs = gpt1_tmr0_modemux, .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux), }; /* Pad multiplexing for gpt1_tmr1 device */ static const unsigned gpt1_tmr1_pins[] = { 4, 5 }; static struct spear_muxreg gpt1_tmr1_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT1_TMR1_MASK, .val = PMX_GPT1_TMR1_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_GPT1_TMR1_MASK, .val = PMX_GPT1_TMR1_MASK, }, }; static struct spear_modemux gpt1_tmr1_modemux[] = { { .muxregs = gpt1_tmr1_muxreg, .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg), }, }; static struct spear_pingroup gpt1_tmr1_pingroup = { .name = "gpt1_tmr1_grp", .pins = gpt1_tmr1_pins, .npins = ARRAY_SIZE(gpt1_tmr1_pins), .modemuxs = gpt1_tmr1_modemux, .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux), }; static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" }; static struct spear_function gpt1_function = { .name = "gpt1", .groups = gpt1_grps, .ngroups = ARRAY_SIZE(gpt1_grps), }; /* Pad multiplexing for mcif device */ static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245 }; #define MCIF_MUXREG \ { \ .reg = PAD_FUNCTION_EN_0, \ .mask = PMX_MCI_DATA8_15_MASK, \ .val = PMX_MCI_DATA8_15_MASK, \ }, { \ .reg = PAD_FUNCTION_EN_1, \ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ PMX_NFWPRT2_MASK, \ .val = PMX_MCIFALL_1_MASK, \ }, { \ .reg = PAD_FUNCTION_EN_2, \ .mask = PMX_MCIFALL_2_MASK, \ .val = PMX_MCIFALL_2_MASK, \ }, { \ .reg = PAD_DIRECTION_SEL_0, \ .mask = PMX_MCI_DATA8_15_MASK, \ .val = PMX_MCI_DATA8_15_MASK, \ }, { \ .reg = PAD_DIRECTION_SEL_1, \ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ PMX_NFWPRT2_MASK, \ .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ PMX_NFWPRT2_MASK, \ }, { \ .reg = PAD_DIRECTION_SEL_2, \ .mask = PMX_MCIFALL_2_MASK, \ .val = PMX_MCIFALL_2_MASK, \ } /* sdhci device */ static struct spear_muxreg sdhci_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_SD, }, }; static struct spear_modemux sdhci_modemux[] = { { .muxregs = sdhci_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_muxreg), }, }; static struct spear_pingroup sdhci_pingroup = { .name = "sdhci_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = sdhci_modemux, .nmodemuxs = ARRAY_SIZE(sdhci_modemux), }; static const char *const sdhci_grps[] = { "sdhci_grp" }; static struct spear_function sdhci_function = { .name = "sdhci", .groups = sdhci_grps, .ngroups = ARRAY_SIZE(sdhci_grps), }; /* cf device */ static struct spear_muxreg cf_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_CF, }, }; static struct spear_modemux cf_modemux[] = { { .muxregs = cf_muxreg, .nmuxregs = ARRAY_SIZE(cf_muxreg), }, }; static struct spear_pingroup cf_pingroup = { .name = "cf_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = cf_modemux, .nmodemuxs = ARRAY_SIZE(cf_modemux), }; static const char *const cf_grps[] = { "cf_grp" }; static struct spear_function cf_function = { .name = "cf", .groups = cf_grps, .ngroups = ARRAY_SIZE(cf_grps), }; /* xd device */ static struct spear_muxreg xd_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_XD, }, }; static struct spear_modemux xd_modemux[] = { { .muxregs = xd_muxreg, .nmuxregs = ARRAY_SIZE(xd_muxreg), }, }; static struct spear_pingroup xd_pingroup = { .name = "xd_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = xd_modemux, .nmodemuxs = ARRAY_SIZE(xd_modemux), }; static const char *const xd_grps[] = { "xd_grp" }; static struct spear_function xd_function = { .name = "xd", .groups = xd_grps, .ngroups = ARRAY_SIZE(xd_grps), }; /* Pad multiplexing for touch_xy device */ static const unsigned touch_xy_pins[] = { 97 }; static struct spear_muxreg touch_xy_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_TOUCH_XY_MASK, .val = PMX_TOUCH_XY_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_TOUCH_XY_MASK, .val = PMX_TOUCH_XY_MASK, }, }; static struct spear_modemux touch_xy_modemux[] = { { .muxregs = touch_xy_muxreg, .nmuxregs = ARRAY_SIZE(touch_xy_muxreg), }, }; static struct spear_pingroup touch_xy_pingroup = { .name = "touch_xy_grp", .pins = touch_xy_pins, .npins = ARRAY_SIZE(touch_xy_pins), .modemuxs = touch_xy_modemux, .nmodemuxs = ARRAY_SIZE(touch_xy_modemux), }; static const char *const touch_xy_grps[] = { "touch_xy_grp" }; static struct spear_function touch_xy_function = { .name = "touchscreen", .groups = touch_xy_grps, .ngroups = ARRAY_SIZE(touch_xy_grps), }; /* Pad multiplexing for uart1 device */ /* Muxed with I2C */ static const unsigned uart1_dis_i2c_pins[] = { 102, 103 }; static struct spear_muxreg uart1_dis_i2c_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2C0_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_I2C0_MASK, .val = PMX_I2C0_MASK, }, }; static struct spear_modemux uart1_dis_i2c_modemux[] = { { .muxregs = uart1_dis_i2c_muxreg, .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg), }, }; static struct spear_pingroup uart_1_dis_i2c_pingroup = { .name = "uart1_disable_i2c_grp", .pins = uart1_dis_i2c_pins, .npins = ARRAY_SIZE(uart1_dis_i2c_pins), .modemuxs = uart1_dis_i2c_modemux, .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux), }; /* Muxed with SD/MMC */ static const unsigned uart1_dis_sd_pins[] = { 214, 215 }; static struct spear_muxreg uart1_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_MCIDATA1_MASK | PMX_MCIDATA2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_MCIDATA1_MASK | PMX_MCIDATA2_MASK, .val = PMX_MCIDATA1_MASK | PMX_MCIDATA2_MASK, }, }; static struct spear_modemux uart1_dis_sd_modemux[] = { { .muxregs = uart1_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg), }, }; static struct spear_pingroup uart_1_dis_sd_pingroup = { .name = "uart1_disable_sd_grp", .pins = uart1_dis_sd_pins, .npins = ARRAY_SIZE(uart1_dis_sd_pins), .modemuxs = uart1_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux), }; static const char *const uart1_grps[] = { "uart1_disable_i2c_grp", "uart1_disable_sd_grp" }; static struct spear_function uart1_function = { .name = "uart1", .groups = uart1_grps, .ngroups = ARRAY_SIZE(uart1_grps), }; /* Pad multiplexing for uart2_3 device */ static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 }; static struct spear_muxreg uart2_3_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_I2S0_MASK, .val = PMX_I2S0_MASK, }, }; static struct spear_modemux uart2_3_modemux[] = { { .muxregs = uart2_3_muxreg, .nmuxregs = ARRAY_SIZE(uart2_3_muxreg), }, }; static struct spear_pingroup uart_2_3_pingroup = { .name = "uart2_3_grp", .pins = uart2_3_pins, .npins = ARRAY_SIZE(uart2_3_pins), .modemuxs = uart2_3_modemux, .nmodemuxs = ARRAY_SIZE(uart2_3_modemux), }; static const char *const uart2_3_grps[] = { "uart2_3_grp" }; static struct spear_function uart2_3_function = { .name = "uart2_3", .groups = uart2_3_grps, .ngroups = ARRAY_SIZE(uart2_3_grps), }; /* Pad multiplexing for uart4 device */ static const unsigned uart4_pins[] = { 108, 113 }; static struct spear_muxreg uart4_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, .val = PMX_I2S0_MASK | PMX_CLCD1_MASK, }, }; static struct spear_modemux uart4_modemux[] = { { .muxregs = uart4_muxreg, .nmuxregs = ARRAY_SIZE(uart4_muxreg), }, }; static struct spear_pingroup uart_4_pingroup = { .name = "uart4_grp", .pins = uart4_pins, .npins = ARRAY_SIZE(uart4_pins), .modemuxs = uart4_modemux, .nmodemuxs = ARRAY_SIZE(uart4_modemux), }; static const char *const uart4_grps[] = { "uart4_grp" }; static struct spear_function uart4_function = { .name = "uart4", .groups = uart4_grps, .ngroups = ARRAY_SIZE(uart4_grps), }; /* Pad multiplexing for uart5 device */ static const unsigned uart5_pins[] = { 114, 115 }; static struct spear_muxreg uart5_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, }, }; static struct spear_modemux uart5_modemux[] = { { .muxregs = uart5_muxreg, .nmuxregs = ARRAY_SIZE(uart5_muxreg), }, }; static struct spear_pingroup uart_5_pingroup = { .name = "uart5_grp", .pins = uart5_pins, .npins = ARRAY_SIZE(uart5_pins), .modemuxs = uart5_modemux, .nmodemuxs = ARRAY_SIZE(uart5_modemux), }; static const char *const uart5_grps[] = { "uart5_grp" }; static struct spear_function uart5_function = { .name = "uart5", .groups = uart5_grps, .ngroups = ARRAY_SIZE(uart5_grps), }; /* Pad multiplexing for rs485_0_1_tdm_0_1 device */ static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137 }; static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, }, }; static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = { { .muxregs = rs485_0_1_tdm_0_1_muxreg, .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg), }, }; static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = { .name = "rs485_0_1_tdm_0_1_grp", .pins = rs485_0_1_tdm_0_1_pins, .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins), .modemuxs = rs485_0_1_tdm_0_1_modemux, .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux), }; static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" }; static struct spear_function rs485_0_1_tdm_0_1_function = { .name = "rs485_0_1_tdm_0_1", .groups = rs485_0_1_tdm_0_1_grps, .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps), }; /* Pad multiplexing for i2c_1_2 device */ static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 }; static struct spear_muxreg i2c_1_2_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, }, }; static struct spear_modemux i2c_1_2_modemux[] = { { .muxregs = i2c_1_2_muxreg, .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg), }, }; static struct spear_pingroup i2c_1_2_pingroup = { .name = "i2c_1_2_grp", .pins = i2c_1_2_pins, .npins = ARRAY_SIZE(i2c_1_2_pins), .modemuxs = i2c_1_2_modemux, .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux), }; static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" }; static struct spear_function i2c_1_2_function = { .name = "i2c_1_2", .groups = i2c_1_2_grps, .ngroups = ARRAY_SIZE(i2c_1_2_grps), }; /* Pad multiplexing for i2c3_dis_smi_clcd device */ /* Muxed with SMI & CLCD */ static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 }; static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, .val = PMX_CLCD1_MASK | PMX_SMI_MASK, }, }; static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = { { .muxregs = i2c3_dis_smi_clcd_muxreg, .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg), }, }; static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = { .name = "i2c3_dis_smi_clcd_grp", .pins = i2c3_dis_smi_clcd_pins, .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins), .modemuxs = i2c3_dis_smi_clcd_modemux, .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux), }; /* Pad multiplexing for i2c3_dis_sd_i2s0 device */ /* Muxed with SD/MMC & I2S1 */ static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 }; static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, }, }; static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = { { .muxregs = i2c3_dis_sd_i2s0_muxreg, .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg), }, }; static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = { .name = "i2c3_dis_sd_i2s0_grp", .pins = i2c3_dis_sd_i2s0_pins, .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins), .modemuxs = i2c3_dis_sd_i2s0_modemux, .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux), }; static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp" }; static struct spear_function i2c3_unction = { .name = "i2c3_i2s1", .groups = i2c3_grps, .ngroups = ARRAY_SIZE(i2c3_grps), }; /* Pad multiplexing for i2c_4_5_dis_smi device */ /* Muxed with SMI */ static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 }; static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SMI_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, }, }; static struct spear_modemux i2c_4_5_dis_smi_modemux[] = { { .muxregs = i2c_4_5_dis_smi_muxreg, .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg), }, }; static struct spear_pingroup i2c_4_5_dis_smi_pingroup = { .name = "i2c_4_5_dis_smi_grp", .pins = i2c_4_5_dis_smi_pins, .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins), .modemuxs = i2c_4_5_dis_smi_modemux, .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux), }; /* Pad multiplexing for i2c4_dis_sd device */ /* Muxed with SD/MMC */ static const unsigned i2c4_dis_sd_pins[] = { 217, 218 }; static struct spear_muxreg i2c4_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_MCIDATA4_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIDATA5_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_MCIDATA4_MASK, .val = PMX_MCIDATA4_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCIDATA5_MASK, .val = PMX_MCIDATA5_MASK, }, }; static struct spear_modemux i2c4_dis_sd_modemux[] = { { .muxregs = i2c4_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg), }, }; static struct spear_pingroup i2c4_dis_sd_pingroup = { .name = "i2c4_dis_sd_grp", .pins = i2c4_dis_sd_pins, .npins = ARRAY_SIZE(i2c4_dis_sd_pins), .modemuxs = i2c4_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux), }; /* Pad multiplexing for i2c5_dis_sd device */ /* Muxed with SD/MMC */ static const unsigned i2c5_dis_sd_pins[] = { 219, 220 }; static struct spear_muxreg i2c5_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIDATA6_MASK | PMX_MCIDATA7_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCIDATA6_MASK | PMX_MCIDATA7_MASK, .val = PMX_MCIDATA6_MASK | PMX_MCIDATA7_MASK, }, }; static struct spear_modemux i2c5_dis_sd_modemux[] = { { .muxregs = i2c5_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg), }, }; static struct spear_pingroup i2c5_dis_sd_pingroup = { .name = "i2c5_dis_sd_grp", .pins = i2c5_dis_sd_pins, .npins = ARRAY_SIZE(i2c5_dis_sd_pins), .modemuxs = i2c5_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux), }; static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp", "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" }; static struct spear_function i2c_4_5_function = { .name = "i2c_4_5", .groups = i2c_4_5_grps, .ngroups = ARRAY_SIZE(i2c_4_5_grps), }; /* Pad multiplexing for i2c_6_7_dis_kbd device */ /* Muxed with KBD */ static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 }; static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = PMX_KBD_ROWCOL25_MASK, }, }; static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = { { .muxregs = i2c_6_7_dis_kbd_muxreg, .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg), }, }; static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = { .name = "i2c_6_7_dis_kbd_grp", .pins = i2c_6_7_dis_kbd_pins, .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins), .modemuxs = i2c_6_7_dis_kbd_modemux, .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux), }; /* Pad multiplexing for i2c6_dis_sd device */ /* Muxed with SD/MMC */ static const unsigned i2c6_dis_sd_pins[] = { 236, 237 }; static struct spear_muxreg i2c6_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIIORDRE_MASK | PMX_MCIIOWRWE_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCIIORDRE_MASK | PMX_MCIIOWRWE_MASK, .val = PMX_MCIIORDRE_MASK | PMX_MCIIOWRWE_MASK, }, }; static struct spear_modemux i2c6_dis_sd_modemux[] = { { .muxregs = i2c6_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg), }, }; static struct spear_pingroup i2c6_dis_sd_pingroup = { .name = "i2c6_dis_sd_grp", .pins = i2c6_dis_sd_pins, .npins = ARRAY_SIZE(i2c6_dis_sd_pins), .modemuxs = i2c6_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux), }; /* Pad multiplexing for i2c7_dis_sd device */ static const unsigned i2c7_dis_sd_pins[] = { 238, 239 }; static struct spear_muxreg i2c7_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIRESETCF_MASK | PMX_MCICS0CE_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCIRESETCF_MASK | PMX_MCICS0CE_MASK, .val = PMX_MCIRESETCF_MASK | PMX_MCICS0CE_MASK, }, }; static struct spear_modemux i2c7_dis_sd_modemux[] = { { .muxregs = i2c7_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg), }, }; static struct spear_pingroup i2c7_dis_sd_pingroup = { .name = "i2c7_dis_sd_grp", .pins = i2c7_dis_sd_pins, .npins = ARRAY_SIZE(i2c7_dis_sd_pins), .modemuxs = i2c7_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux), }; static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" }; static struct spear_function i2c_6_7_function = { .name = "i2c_6_7", .groups = i2c_6_7_grps, .ngroups = ARRAY_SIZE(i2c_6_7_grps), }; /* Pad multiplexing for can0_dis_nor device */ /* Muxed with NOR */ static const unsigned can0_dis_nor_pins[] = { 56, 57 }; static struct spear_muxreg can0_dis_nor_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_NFRSTPWDWN2_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NFRSTPWDWN3_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_NFRSTPWDWN2_MASK, .val = PMX_NFRSTPWDWN2_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_NFRSTPWDWN3_MASK, .val = PMX_NFRSTPWDWN3_MASK, }, }; static struct spear_modemux can0_dis_nor_modemux[] = { { .muxregs = can0_dis_nor_muxreg, .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg), }, }; static struct spear_pingroup can0_dis_nor_pingroup = { .name = "can0_dis_nor_grp", .pins = can0_dis_nor_pins, .npins = ARRAY_SIZE(can0_dis_nor_pins), .modemuxs = can0_dis_nor_modemux, .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux), }; /* Pad multiplexing for can0_dis_sd device */ /* Muxed with SD/MMC */ static const unsigned can0_dis_sd_pins[] = { 240, 241 }; static struct spear_muxreg can0_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, }, }; static struct spear_modemux can0_dis_sd_modemux[] = { { .muxregs = can0_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg), }, }; static struct spear_pingroup can0_dis_sd_pingroup = { .name = "can0_dis_sd_grp", .pins = can0_dis_sd_pins, .npins = ARRAY_SIZE(can0_dis_sd_pins), .modemuxs = can0_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux), }; static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp" }; static struct spear_function can0_function = { .name = "can0", .groups = can0_grps, .ngroups = ARRAY_SIZE(can0_grps), }; /* Pad multiplexing for can1_dis_sd device */ /* Muxed with SD/MMC */ static const unsigned can1_dis_sd_pins[] = { 242, 243 }; static struct spear_muxreg can1_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, }, }; static struct spear_modemux can1_dis_sd_modemux[] = { { .muxregs = can1_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg), }, }; static struct spear_pingroup can1_dis_sd_pingroup = { .name = "can1_dis_sd_grp", .pins = can1_dis_sd_pins, .npins = ARRAY_SIZE(can1_dis_sd_pins), .modemuxs = can1_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux), }; /* Pad multiplexing for can1_dis_kbd device */ /* Muxed with KBD */ static const unsigned can1_dis_kbd_pins[] = { 201, 202 }; static struct spear_muxreg can1_dis_kbd_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = PMX_KBD_ROWCOL25_MASK, }, }; static struct spear_modemux can1_dis_kbd_modemux[] = { { .muxregs = can1_dis_kbd_muxreg, .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg), }, }; static struct spear_pingroup can1_dis_kbd_pingroup = { .name = "can1_dis_kbd_grp", .pins = can1_dis_kbd_pins, .npins = ARRAY_SIZE(can1_dis_kbd_pins), .modemuxs = can1_dis_kbd_modemux, .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux), }; static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp" }; static struct spear_function can1_function = { .name = "can1", .groups = can1_grps, .ngroups = ARRAY_SIZE(can1_grps), }; /* Pad multiplexing for (ras-ip) pci device */ static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 }; static struct spear_muxreg pci_muxreg[] = { { .reg = PAD_FUNCTION_EN_0, .mask = PMX_MCI_DATA8_15_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PMX_PCI_REG1_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_2, .mask = PMX_PCI_REG2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_0, .mask = PMX_MCI_DATA8_15_MASK, .val = PMX_MCI_DATA8_15_MASK, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_PCI_REG1_MASK, .val = PMX_PCI_REG1_MASK, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_PCI_REG2_MASK, .val = PMX_PCI_REG2_MASK, }, }; static struct spear_modemux pci_modemux[] = { { .muxregs = pci_muxreg, .nmuxregs = ARRAY_SIZE(pci_muxreg), }, }; static struct spear_pingroup pci_pingroup = { .name = "pci_grp", .pins = pci_pins, .npins = ARRAY_SIZE(pci_pins), .modemuxs = pci_modemux, .nmodemuxs = ARRAY_SIZE(pci_modemux), }; static const char *const pci_grps[] = { "pci_grp" }; static struct spear_function pci_function = { .name = "pci", .groups = pci_grps, .ngroups = ARRAY_SIZE(pci_grps), }; /* pad multiplexing for (fix-part) pcie0 device */ static struct spear_muxreg pcie0_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(0), .val = PCIE_CFG_VAL(0), }, }; static struct spear_modemux pcie0_modemux[] = { { .muxregs = pcie0_muxreg, .nmuxregs = ARRAY_SIZE(pcie0_muxreg), }, }; static struct spear_pingroup pcie0_pingroup = { .name = "pcie0_grp", .modemuxs = pcie0_modemux, .nmodemuxs = ARRAY_SIZE(pcie0_modemux), }; /* pad multiplexing for (fix-part) pcie1 device */ static struct spear_muxreg pcie1_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(1), .val = PCIE_CFG_VAL(1), }, }; static struct spear_modemux pcie1_modemux[] = { { .muxregs = pcie1_muxreg, .nmuxregs = ARRAY_SIZE(pcie1_muxreg), }, }; static struct spear_pingroup pcie1_pingroup = { .name = "pcie1_grp", .modemuxs = pcie1_modemux, .nmodemuxs = ARRAY_SIZE(pcie1_modemux), }; /* pad multiplexing for (fix-part) pcie2 device */ static struct spear_muxreg pcie2_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(2), .val = PCIE_CFG_VAL(2), }, }; static struct spear_modemux pcie2_modemux[] = { { .muxregs = pcie2_muxreg, .nmuxregs = ARRAY_SIZE(pcie2_muxreg), }, }; static struct spear_pingroup pcie2_pingroup = { .name = "pcie2_grp", .modemuxs = pcie2_modemux, .nmodemuxs = ARRAY_SIZE(pcie2_modemux), }; static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" }; static struct spear_function pcie_function = { .name = "pci_express", .groups = pcie_grps, .ngroups = ARRAY_SIZE(pcie_grps), }; /* pad multiplexing for sata0 device */ static struct spear_muxreg sata0_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(0), .val = SATA_CFG_VAL(0), }, }; static struct spear_modemux sata0_modemux[] = { { .muxregs = sata0_muxreg, .nmuxregs = ARRAY_SIZE(sata0_muxreg), }, }; static struct spear_pingroup sata0_pingroup = { .name = "sata0_grp", .modemuxs = sata0_modemux, .nmodemuxs = ARRAY_SIZE(sata0_modemux), }; /* pad multiplexing for sata1 device */ static struct spear_muxreg sata1_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(1), .val = SATA_CFG_VAL(1), }, }; static struct spear_modemux sata1_modemux[] = { { .muxregs = sata1_muxreg, .nmuxregs = ARRAY_SIZE(sata1_muxreg), }, }; static struct spear_pingroup sata1_pingroup = { .name = "sata1_grp", .modemuxs = sata1_modemux, .nmodemuxs = ARRAY_SIZE(sata1_modemux), }; /* pad multiplexing for sata2 device */ static struct spear_muxreg sata2_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(2), .val = SATA_CFG_VAL(2), }, }; static struct spear_modemux sata2_modemux[] = { { .muxregs = sata2_muxreg, .nmuxregs = ARRAY_SIZE(sata2_muxreg), }, }; static struct spear_pingroup sata2_pingroup = { .name = "sata2_grp", .modemuxs = sata2_modemux, .nmodemuxs = ARRAY_SIZE(sata2_modemux), }; static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp" }; static struct spear_function sata_function = { .name = "sata", .groups = sata_grps, .ngroups = ARRAY_SIZE(sata_grps), }; /* Pad multiplexing for ssp1_dis_kbd device */ static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 }; static struct spear_muxreg ssp1_dis_kbd_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | PMX_NFCE2_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_1, .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | PMX_NFCE2_MASK, .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | PMX_NFCE2_MASK, }, }; static struct spear_modemux ssp1_dis_kbd_modemux[] = { { .muxregs = ssp1_dis_kbd_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg), }, }; static struct spear_pingroup ssp1_dis_kbd_pingroup = { .name = "ssp1_dis_kbd_grp", .pins = ssp1_dis_kbd_pins, .npins = ARRAY_SIZE(ssp1_dis_kbd_pins), .modemuxs = ssp1_dis_kbd_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux), }; /* Pad multiplexing for ssp1_dis_sd device */ static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 }; static struct spear_muxreg ssp1_dis_sd_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | PMX_MCICECF_MASK | PMX_MCICEXD_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | PMX_MCICECF_MASK | PMX_MCICEXD_MASK, .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | PMX_MCICECF_MASK | PMX_MCICEXD_MASK, }, }; static struct spear_modemux ssp1_dis_sd_modemux[] = { { .muxregs = ssp1_dis_sd_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg), }, }; static struct spear_pingroup ssp1_dis_sd_pingroup = { .name = "ssp1_dis_sd_grp", .pins = ssp1_dis_sd_pins, .npins = ARRAY_SIZE(ssp1_dis_sd_pins), .modemuxs = ssp1_dis_sd_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux), }; static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp", "ssp1_dis_sd_grp" }; static struct spear_function ssp1_function = { .name = "ssp1", .groups = ssp1_grps, .ngroups = ARRAY_SIZE(ssp1_grps), }; /* Pad multiplexing for gpt64 device */ static const unsigned gpt64_pins[] = { 230, 231, 232, 245 }; static struct spear_muxreg gpt64_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK | PMX_MCILEDS_MASK, .val = 0, }, { .reg = PAD_DIRECTION_SEL_2, .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK | PMX_MCILEDS_MASK, .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK | PMX_MCILEDS_MASK, }, }; static struct spear_modemux gpt64_modemux[] = { { .muxregs = gpt64_muxreg, .nmuxregs = ARRAY_SIZE(gpt64_muxreg), }, }; static struct spear_pingroup gpt64_pingroup = { .name = "gpt64_grp", .pins = gpt64_pins, .npins = ARRAY_SIZE(gpt64_pins), .modemuxs = gpt64_modemux, .nmodemuxs = ARRAY_SIZE(gpt64_modemux), }; static const char *const gpt64_grps[] = { "gpt64_grp" }; static struct spear_function gpt64_function = { .name = "gpt64", .groups = gpt64_grps, .ngroups = ARRAY_SIZE(gpt64_grps), }; /* pingroups */ static struct spear_pingroup *spear1310_pingroups[] = { &i2c0_pingroup, &ssp0_pingroup, &i2s0_pingroup, &i2s1_pingroup, &clcd_pingroup, &clcd_high_res_pingroup, &arm_gpio_pingroup, &smi_2_chips_pingroup, &smi_4_chips_pingroup, &gmii_pingroup, &rgmii_pingroup, &smii_0_1_2_pingroup, &ras_mii_txclk_pingroup, &nand_8bit_pingroup, &nand_16bit_pingroup, &nand_4_chips_pingroup, &keyboard_6x6_pingroup, &keyboard_rowcol6_8_pingroup, &uart0_pingroup, &uart0_modem_pingroup, &gpt0_tmr0_pingroup, &gpt0_tmr1_pingroup, &gpt1_tmr0_pingroup, &gpt1_tmr1_pingroup, &sdhci_pingroup, &cf_pingroup, &xd_pingroup, &touch_xy_pingroup, &ssp0_cs0_pingroup, &ssp0_cs1_2_pingroup, &uart_1_dis_i2c_pingroup, &uart_1_dis_sd_pingroup, &uart_2_3_pingroup, &uart_4_pingroup, &uart_5_pingroup, &rs485_0_1_tdm_0_1_pingroup, &i2c_1_2_pingroup, &i2c3_dis_smi_clcd_pingroup, &i2c3_dis_sd_i2s0_pingroup, &i2c_4_5_dis_smi_pingroup, &i2c4_dis_sd_pingroup, &i2c5_dis_sd_pingroup, &i2c_6_7_dis_kbd_pingroup, &i2c6_dis_sd_pingroup, &i2c7_dis_sd_pingroup, &can0_dis_nor_pingroup, &can0_dis_sd_pingroup, &can1_dis_sd_pingroup, &can1_dis_kbd_pingroup, &pci_pingroup, &pcie0_pingroup, &pcie1_pingroup, &pcie2_pingroup, &sata0_pingroup, &sata1_pingroup, &sata2_pingroup, &ssp1_dis_kbd_pingroup, &ssp1_dis_sd_pingroup, &gpt64_pingroup, }; /* functions */ static struct spear_function *spear1310_functions[] = { &i2c0_function, &ssp0_function, &i2s0_function, &i2s1_function, &clcd_function, &arm_gpio_function, &smi_function, &gmii_function, &rgmii_function, &smii_0_1_2_function, &ras_mii_txclk_function, &nand_function, &keyboard_function, &uart0_function, &gpt0_function, &gpt1_function, &sdhci_function, &cf_function, &xd_function, &touch_xy_function, &uart1_function, &uart2_3_function, &uart4_function, &uart5_function, &rs485_0_1_tdm_0_1_function, &i2c_1_2_function, &i2c3_unction, &i2c_4_5_function, &i2c_6_7_function, &can0_function, &can1_function, &pci_function, &pcie_function, &sata_function, &ssp1_function, &gpt64_function, }; static const unsigned pin18[] = { 18, }; static const unsigned pin19[] = { 19, }; static const unsigned pin20[] = { 20, }; static const unsigned pin21[] = { 21, }; static const unsigned pin22[] = { 22, }; static const unsigned pin23[] = { 23, }; static const unsigned pin54[] = { 54, }; static const unsigned pin55[] = { 55, }; static const unsigned pin56[] = { 56, }; static const unsigned pin57[] = { 57, }; static const unsigned pin58[] = { 58, }; static const unsigned pin59[] = { 59, }; static const unsigned pin60[] = { 60, }; static const unsigned pin61[] = { 61, }; static const unsigned pin62[] = { 62, }; static const unsigned pin63[] = { 63, }; static const unsigned pin143[] = { 143, }; static const unsigned pin144[] = { 144, }; static const unsigned pin145[] = { 145, }; static const unsigned pin146[] = { 146, }; static const unsigned pin147[] = { 147, }; static const unsigned pin148[] = { 148, }; static const unsigned pin149[] = { 149, }; static const unsigned pin150[] = { 150, }; static const unsigned pin151[] = { 151, }; static const unsigned pin152[] = { 152, }; static const unsigned pin205[] = { 205, }; static const unsigned pin206[] = { 206, }; static const unsigned pin211[] = { 211, }; static const unsigned pin212[] = { 212, }; static const unsigned pin213[] = { 213, }; static const unsigned pin214[] = { 214, }; static const unsigned pin215[] = { 215, }; static const unsigned pin216[] = { 216, }; static const unsigned pin217[] = { 217, }; static const unsigned pin218[] = { 218, }; static const unsigned pin219[] = { 219, }; static const unsigned pin220[] = { 220, }; static const unsigned pin221[] = { 221, }; static const unsigned pin222[] = { 222, }; static const unsigned pin223[] = { 223, }; static const unsigned pin224[] = { 224, }; static const unsigned pin225[] = { 225, }; static const unsigned pin226[] = { 226, }; static const unsigned pin227[] = { 227, }; static const unsigned pin228[] = { 228, }; static const unsigned pin229[] = { 229, }; static const unsigned pin230[] = { 230, }; static const unsigned pin231[] = { 231, }; static const unsigned pin232[] = { 232, }; static const unsigned pin233[] = { 233, }; static const unsigned pin234[] = { 234, }; static const unsigned pin235[] = { 235, }; static const unsigned pin236[] = { 236, }; static const unsigned pin237[] = { 237, }; static const unsigned pin238[] = { 238, }; static const unsigned pin239[] = { 239, }; static const unsigned pin240[] = { 240, }; static const unsigned pin241[] = { 241, }; static const unsigned pin242[] = { 242, }; static const unsigned pin243[] = { 243, }; static const unsigned pin244[] = { 244, }; static const unsigned pin245[] = { 245, }; static const unsigned pin_grp0[] = { 173, 174, }; static const unsigned pin_grp1[] = { 175, 185, 188, 197, 198, }; static const unsigned pin_grp2[] = { 176, 177, 178, 179, 184, 186, 187, 189, 190, 191, 192, }; static const unsigned pin_grp3[] = { 180, 181, 182, 183, 193, 194, 195, 196, }; static const unsigned pin_grp4[] = { 199, 200, }; static const unsigned pin_grp5[] = { 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, }; static const unsigned pin_grp6[] = { 86, 87, 88, 89, 90, 91, 92, 93, }; static const unsigned pin_grp7[] = { 98, 99, }; static const unsigned pin_grp8[] = { 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, }; /* Define muxreg arrays */ DEFINE_2_MUXREG(i2c0_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_I2C0_MASK, 0, 1); DEFINE_2_MUXREG(ssp0_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_SSP0_MASK, 0, 1); DEFINE_2_MUXREG(ssp0_cs0_pins, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_SSP0_CS0_MASK, 0, 1); DEFINE_2_MUXREG(ssp0_cs1_2_pins, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_SSP0_CS1_2_MASK, 0, 1); DEFINE_2_MUXREG(i2s0_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_I2S0_MASK, 0, 1); DEFINE_2_MUXREG(i2s1_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_I2S1_MASK, 0, 1); DEFINE_2_MUXREG(clcd_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_CLCD1_MASK, 0, 1); DEFINE_2_MUXREG(clcd_high_res_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_CLCD2_MASK, 0, 1); DEFINE_2_MUXREG(pin18, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO15_MASK, 0, 1); DEFINE_2_MUXREG(pin19, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO14_MASK, 0, 1); DEFINE_2_MUXREG(pin20, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO13_MASK, 0, 1); DEFINE_2_MUXREG(pin21, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO12_MASK, 0, 1); DEFINE_2_MUXREG(pin22, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO11_MASK, 0, 1); DEFINE_2_MUXREG(pin23, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO10_MASK, 0, 1); DEFINE_2_MUXREG(pin143, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO00_MASK, 0, 1); DEFINE_2_MUXREG(pin144, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO01_MASK, 0, 1); DEFINE_2_MUXREG(pin145, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO02_MASK, 0, 1); DEFINE_2_MUXREG(pin146, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO03_MASK, 0, 1); DEFINE_2_MUXREG(pin147, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO04_MASK, 0, 1); DEFINE_2_MUXREG(pin148, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO05_MASK, 0, 1); DEFINE_2_MUXREG(pin149, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO06_MASK, 0, 1); DEFINE_2_MUXREG(pin150, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO07_MASK, 0, 1); DEFINE_2_MUXREG(pin151, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO08_MASK, 0, 1); DEFINE_2_MUXREG(pin152, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_EGPIO09_MASK, 0, 1); DEFINE_2_MUXREG(smi_2_chips_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_SMI_MASK, 0, 1); DEFINE_2_MUXREG(pin54, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_SMINCS3_MASK, 0, 1); DEFINE_2_MUXREG(pin55, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_SMINCS2_MASK, 0, 1); DEFINE_2_MUXREG(pin56, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_NFRSTPWDWN3_MASK, 0, 1); DEFINE_2_MUXREG(pin57, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFRSTPWDWN2_MASK, 0, 1); DEFINE_2_MUXREG(pin58, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFRSTPWDWN1_MASK, 0, 1); DEFINE_2_MUXREG(pin59, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFRSTPWDWN0_MASK, 0, 1); DEFINE_2_MUXREG(pin60, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFWPRT3_MASK, 0, 1); DEFINE_2_MUXREG(pin61, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFCE3_MASK, 0, 1); DEFINE_2_MUXREG(pin62, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFAD25_MASK, 0, 1); DEFINE_2_MUXREG(pin63, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFAD24_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp0, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_GMIICLK_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp1, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp2, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_RXCLK_RDV_TXEN_D03_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp3, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_GMIID47_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp4, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_MDC_MDIO_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp5, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_NFAD23_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp6, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_MCI_DATA8_15_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp7, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_NFCE2_MASK, 0, 1); DEFINE_2_MUXREG(pin_grp8, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_NAND8_MASK, 0, 1); DEFINE_2_MUXREG(nand_16bit_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_NAND16BIT_1_MASK, 0, 1); DEFINE_2_MUXREG(pin205, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_KBD_COL1_MASK | PMX_NFCE1_MASK, 0, 1); DEFINE_2_MUXREG(pin206, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_KBD_COL0_MASK | PMX_NFCE2_MASK, 0, 1); DEFINE_2_MUXREG(pin211, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK, 0, 1); DEFINE_2_MUXREG(pin212, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK, 0, 1); DEFINE_2_MUXREG(pin213, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_MCIDATA0_MASK, 0, 1); DEFINE_2_MUXREG(pin214, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_MCIDATA1_MASK, 0, 1); DEFINE_2_MUXREG(pin215, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_MCIDATA2_MASK, 0, 1); DEFINE_2_MUXREG(pin216, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_MCIDATA3_MASK, 0, 1); DEFINE_2_MUXREG(pin217, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_MCIDATA4_MASK, 0, 1); DEFINE_2_MUXREG(pin218, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA5_MASK, 0, 1); DEFINE_2_MUXREG(pin219, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA6_MASK, 0, 1); DEFINE_2_MUXREG(pin220, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA7_MASK, 0, 1); DEFINE_2_MUXREG(pin221, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA1SD_MASK, 0, 1); DEFINE_2_MUXREG(pin222, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA2SD_MASK, 0, 1); DEFINE_2_MUXREG(pin223, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATA3SD_MASK, 0, 1); DEFINE_2_MUXREG(pin224, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIADDR0ALE_MASK, 0, 1); DEFINE_2_MUXREG(pin225, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIADDR1CLECLK_MASK, 0, 1); DEFINE_2_MUXREG(pin226, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIADDR2_MASK, 0, 1); DEFINE_2_MUXREG(pin227, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICECF_MASK, 0, 1); DEFINE_2_MUXREG(pin228, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICEXD_MASK, 0, 1); DEFINE_2_MUXREG(pin229, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICESDMMC_MASK, 0, 1); DEFINE_2_MUXREG(pin230, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICDCF1_MASK, 0, 1); DEFINE_2_MUXREG(pin231, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICDCF2_MASK, 0, 1); DEFINE_2_MUXREG(pin232, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICDXD_MASK, 0, 1); DEFINE_2_MUXREG(pin233, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICDSDMMC_MASK, 0, 1); DEFINE_2_MUXREG(pin234, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDATADIR_MASK, 0, 1); DEFINE_2_MUXREG(pin235, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDMARQWP_MASK, 0, 1); DEFINE_2_MUXREG(pin236, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIIORDRE_MASK, 0, 1); DEFINE_2_MUXREG(pin237, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIIOWRWE_MASK, 0, 1); DEFINE_2_MUXREG(pin238, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIRESETCF_MASK, 0, 1); DEFINE_2_MUXREG(pin239, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICS0CE_MASK, 0, 1); DEFINE_2_MUXREG(pin240, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICFINTR_MASK, 0, 1); DEFINE_2_MUXREG(pin241, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIIORDY_MASK, 0, 1); DEFINE_2_MUXREG(pin242, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCICS1_MASK, 0, 1); DEFINE_2_MUXREG(pin243, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCIDMAACK_MASK, 0, 1); DEFINE_2_MUXREG(pin244, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCISDCMD_MASK, 0, 1); DEFINE_2_MUXREG(pin245, PAD_FUNCTION_EN_2, PAD_DIRECTION_SEL_2, PMX_MCILEDS_MASK, 0, 1); DEFINE_2_MUXREG(keyboard_rowcol6_8_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_KBD_ROWCOL68_MASK, 0, 1); DEFINE_2_MUXREG(uart0_pins, PAD_FUNCTION_EN_0, PAD_DIRECTION_SEL_0, PMX_UART0_MASK, 0, 1); DEFINE_2_MUXREG(uart0_modem_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_UART0_MODEM_MASK, 0, 1); DEFINE_2_MUXREG(gpt0_tmr0_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_GPT0_TMR0_MASK, 0, 1); DEFINE_2_MUXREG(gpt0_tmr1_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_GPT0_TMR1_MASK, 0, 1); DEFINE_2_MUXREG(gpt1_tmr0_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_GPT1_TMR0_MASK, 0, 1); DEFINE_2_MUXREG(gpt1_tmr1_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_GPT1_TMR1_MASK, 0, 1); DEFINE_2_MUXREG(touch_xy_pins, PAD_FUNCTION_EN_1, PAD_DIRECTION_SEL_1, PMX_TOUCH_XY_MASK, 0, 1); static struct spear_gpio_pingroup spear1310_gpio_pingroup[] = { GPIO_PINGROUP(i2c0_pins), GPIO_PINGROUP(ssp0_pins), GPIO_PINGROUP(ssp0_cs0_pins), GPIO_PINGROUP(ssp0_cs1_2_pins), GPIO_PINGROUP(i2s0_pins), GPIO_PINGROUP(i2s1_pins), GPIO_PINGROUP(clcd_pins), GPIO_PINGROUP(clcd_high_res_pins), GPIO_PINGROUP(pin18), GPIO_PINGROUP(pin19), GPIO_PINGROUP(pin20), GPIO_PINGROUP(pin21), GPIO_PINGROUP(pin22), GPIO_PINGROUP(pin23), GPIO_PINGROUP(pin143), GPIO_PINGROUP(pin144), GPIO_PINGROUP(pin145), GPIO_PINGROUP(pin146), GPIO_PINGROUP(pin147), GPIO_PINGROUP(pin148), GPIO_PINGROUP(pin149), GPIO_PINGROUP(pin150), GPIO_PINGROUP(pin151), GPIO_PINGROUP(pin152), GPIO_PINGROUP(smi_2_chips_pins), GPIO_PINGROUP(pin54), GPIO_PINGROUP(pin55), GPIO_PINGROUP(pin56), GPIO_PINGROUP(pin57), GPIO_PINGROUP(pin58), GPIO_PINGROUP(pin59), GPIO_PINGROUP(pin60), GPIO_PINGROUP(pin61), GPIO_PINGROUP(pin62), GPIO_PINGROUP(pin63), GPIO_PINGROUP(pin_grp0), GPIO_PINGROUP(pin_grp1), GPIO_PINGROUP(pin_grp2), GPIO_PINGROUP(pin_grp3), GPIO_PINGROUP(pin_grp4), GPIO_PINGROUP(pin_grp5), GPIO_PINGROUP(pin_grp6), GPIO_PINGROUP(pin_grp7), GPIO_PINGROUP(pin_grp8), GPIO_PINGROUP(nand_16bit_pins), GPIO_PINGROUP(pin205), GPIO_PINGROUP(pin206), GPIO_PINGROUP(pin211), GPIO_PINGROUP(pin212), GPIO_PINGROUP(pin213), GPIO_PINGROUP(pin214), GPIO_PINGROUP(pin215), GPIO_PINGROUP(pin216), GPIO_PINGROUP(pin217), GPIO_PINGROUP(pin218), GPIO_PINGROUP(pin219), GPIO_PINGROUP(pin220), GPIO_PINGROUP(pin221), GPIO_PINGROUP(pin222), GPIO_PINGROUP(pin223), GPIO_PINGROUP(pin224), GPIO_PINGROUP(pin225), GPIO_PINGROUP(pin226), GPIO_PINGROUP(pin227), GPIO_PINGROUP(pin228), GPIO_PINGROUP(pin229), GPIO_PINGROUP(pin230), GPIO_PINGROUP(pin231), GPIO_PINGROUP(pin232), GPIO_PINGROUP(pin233), GPIO_PINGROUP(pin234), GPIO_PINGROUP(pin235), GPIO_PINGROUP(pin236), GPIO_PINGROUP(pin237), GPIO_PINGROUP(pin238), GPIO_PINGROUP(pin239), GPIO_PINGROUP(pin240), GPIO_PINGROUP(pin241), GPIO_PINGROUP(pin242), GPIO_PINGROUP(pin243), GPIO_PINGROUP(pin244), GPIO_PINGROUP(pin245), GPIO_PINGROUP(keyboard_rowcol6_8_pins), GPIO_PINGROUP(uart0_pins), GPIO_PINGROUP(uart0_modem_pins), GPIO_PINGROUP(gpt0_tmr0_pins), GPIO_PINGROUP(gpt0_tmr1_pins), GPIO_PINGROUP(gpt1_tmr0_pins), GPIO_PINGROUP(gpt1_tmr1_pins), GPIO_PINGROUP(touch_xy_pins), }; static struct spear_pinctrl_machdata spear1310_machdata = { .pins = spear1310_pins, .npins = ARRAY_SIZE(spear1310_pins), .groups = spear1310_pingroups, .ngroups = ARRAY_SIZE(spear1310_pingroups), .functions = spear1310_functions, .nfunctions = ARRAY_SIZE(spear1310_functions), .gpio_pingroups = spear1310_gpio_pingroup, .ngpio_pingroups = ARRAY_SIZE(spear1310_gpio_pingroup), .modes_supported = false, }; static const struct of_device_id spear1310_pinctrl_of_match[] = { { .compatible = "st,spear1310-pinmux", }, {}, }; static int spear1310_pinctrl_probe(struct platform_device *pdev) { return spear_pinctrl_probe(pdev, &spear1310_machdata); } static struct platform_driver spear1310_pinctrl_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = spear1310_pinctrl_of_match, }, .probe = spear1310_pinctrl_probe, }; static int __init spear1310_pinctrl_init(void) { return platform_driver_register(&spear1310_pinctrl_driver); } arch_initcall(spear1310_pinctrl_init);
gpl-2.0
nunogia/Z7Max_NX505J_H129_kernel
drivers/bif/qpnp-bsi.c
1405
45792
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/atomic.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/spmi.h> #include <linux/workqueue.h> #include <linux/bif/driver.h> #include <linux/qpnp/qpnp-adc.h> enum qpnp_bsi_irq { QPNP_BSI_IRQ_ERR, QPNP_BSI_IRQ_RX, QPNP_BSI_IRQ_TX, QPNP_BSI_IRQ_COUNT, }; enum qpnp_bsi_com_mode { QPNP_BSI_COM_MODE_IRQ, QPNP_BSI_COM_MODE_POLL, }; struct qpnp_bsi_chip { struct bif_ctrl_desc bdesc; struct spmi_device *spmi_dev; struct bif_ctrl_dev *bdev; struct work_struct slave_irq_work; u16 base_addr; u16 batt_id_stat_addr; int r_pullup_ohm; int vid_ref_uV; int tau_index; int tau_sampling_mask; enum bif_bus_state state; enum qpnp_bsi_com_mode com_mode; int irq[QPNP_BSI_IRQ_COUNT]; atomic_t irq_flag[QPNP_BSI_IRQ_COUNT]; int batt_present_irq; enum qpnp_vadc_channels batt_id_adc_channel; struct qpnp_vadc_chip *vadc_dev; }; #define QPNP_BSI_DRIVER_NAME "qcom,qpnp-bsi" enum qpnp_bsi_registers { QPNP_BSI_REG_TYPE = 0x04, QPNP_BSI_REG_SUBTYPE = 0x05, QPNP_BSI_REG_STATUS = 0x08, QPNP_BSI_REG_ENABLE = 0x46, QPNP_BSI_REG_CLEAR_ERROR = 0x4F, QPNP_BSI_REG_FORCE_BCL_LOW = 0x51, QPNP_BSI_REG_TAU_CONFIG = 0x52, QPNP_BSI_REG_MODE = 0x53, QPNP_BSI_REG_RX_TX_ENABLE = 0x54, QPNP_BSI_REG_TX_DATA_LOW = 0x5A, QPNP_BSI_REG_TX_DATA_HIGH = 0x5B, QPNP_BSI_REG_TX_CTRL = 0x5D, QPNP_BSI_REG_RX_DATA_LOW = 0x60, QPNP_BSI_REG_RX_DATA_HIGH = 0x61, QPNP_BSI_REG_RX_SOURCE = 0x62, QPNP_BSI_REG_BSI_ERROR = 0x70, }; #define QPNP_BSI_TYPE 0x02 #define QPNP_BSI_SUBTYPE 0x10 #define QPNP_BSI_STATUS_ERROR 0x10 #define QPNP_BSI_STATUS_TX_BUSY 0x08 #define QPNP_BSI_STATUS_RX_BUSY 0x04 #define QPNP_BSI_STATUS_TX_GO_BUSY 0x02 #define QPNP_BSI_STATUS_RX_DATA_READY 0x01 #define QPNP_BSI_ENABLE_MASK 0x80 #define QPNP_BSI_ENABLE 0x80 #define QPNP_BSI_DISABLE 0x00 #define QPNP_BSI_TAU_CONFIG_SAMPLE_MASK 0x10 #define QPNP_BSI_TAU_CONFIG_SAMPLE_8X 0x10 #define QPNP_BSI_TAU_CONFIG_SAMPLE_4X 0x00 #define QPNP_BSI_TAU_CONFIG_SPEED_MASK 0x07 #define QPNP_BSI_MODE_TX_PULSE_MASK 0x10 #define QPNP_BSI_MODE_TX_PULSE_INT 0x10 #define QPNP_BSI_MODE_TX_PULSE_DATA 0x00 #define QPNP_BSI_MODE_RX_PULSE_MASK 0x08 #define QPNP_BSI_MODE_RX_PULSE_INT 0x08 #define QPNP_BSI_MODE_RX_PULSE_DATA 0x00 #define QPNP_BSI_MODE_TX_PULSE_T_MASK 0x04 #define QPNP_BSI_MODE_TX_PULSE_T_WAKE 0x04 #define QPNP_BSI_MODE_TX_PULSE_T_1_TAU 0x00 #define QPNP_BSI_MODE_RX_FORMAT_MASK 0x02 #define QPNP_BSI_MODE_RX_FORMAT_17_BIT 0x02 #define QPNP_BSI_MODE_RX_FORMAT_11_BIT 0x00 #define QPNP_BSI_MODE_TX_FORMAT_MASK 0x01 #define QPNP_BSI_MODE_TX_FORMAT_17_BIT 0x01 #define QPNP_BSI_MODE_TX_FORMAT_11_BIT 0x00 #define QPNP_BSI_TX_ENABLE_MASK 0x80 #define QPNP_BSI_TX_ENABLE 0x80 #define QPNP_BSI_TX_DISABLE 0x00 #define QPNP_BSI_RX_ENABLE_MASK 0x40 #define QPNP_BSI_RX_ENABLE 0x40 #define QPNP_BSI_RX_DISABLE 0x00 #define QPNP_BSI_TX_DATA_HIGH_MASK 0x07 #define QPNP_BSI_TX_CTRL_GO 0x01 #define QPNP_BSI_RX_DATA_HIGH_MASK 0x07 #define QPNP_BSI_RX_SRC_LOOPBACK_FLAG 0x10 #define QPNP_BSI_BSI_ERROR_CLEAR 0x80 #define QPNP_SMBB_BAT_IF_BATT_PRES_MASK 0x80 #define QPNP_SMBB_BAT_IF_BATT_ID_MASK 0x01 #define QPNP_BSI_NUM_CLOCK_PERIODS 8 struct qpnp_bsi_tau { int period_4x_ns[QPNP_BSI_NUM_CLOCK_PERIODS]; int period_8x_ns[QPNP_BSI_NUM_CLOCK_PERIODS]; int period_4x_us[QPNP_BSI_NUM_CLOCK_PERIODS]; int period_8x_us[QPNP_BSI_NUM_CLOCK_PERIODS]; }; /* Tau BIF clock periods in ns supported by BSI for either 4x or 8x sampling. */ static const struct qpnp_bsi_tau qpnp_bsi_tau_period = { .period_4x_ns = { 150420, 122080, 61040, 31670, 15830, 7920, 3960, 2080 }, .period_8x_ns = { 150420, 122080, 63330, 31670, 15830, 7920, 4170, 2080 }, .period_4x_us = { 151, 122, 61, 32, 16, 8, 4, 2 }, .period_8x_us = { 151, 122, 64, 32, 16, 8, 4, 2 }, }; #define QPNP_BSI_MIN_CLOCK_SPEED_NS 2080 #define QPNP_BSI_MAX_CLOCK_SPEED_NS 150420 #define QPNP_BSI_MIN_PULLUP_OHM 1000 #define QPNP_BSI_MAX_PULLUP_OHM 500000 #define QPNP_BSI_DEFAULT_PULLUP_OHM 100000 #define QPNP_BSI_MIN_VID_REF_UV 500000 #define QPNP_BSI_MAX_VID_REF_UV 5000000 #define QPNP_BSI_DEFAULT_VID_REF_UV 1800000 /* These have units of tau_bif. */ #define QPNP_BSI_MAX_TRANSMIT_CYCLES 46 #define QPNP_BSI_MIN_RECEIVE_CYCLES 24 #define QPNP_BSI_MAX_BUS_QUERY_CYCLES 17 /* * Maximum time in microseconds for a slave to transition from suspend to active * state. */ #define QPNP_BSI_MAX_SLAVE_ACTIVIATION_DELAY_US 50 /* * Maximum time in milliseconds for a slave to transition from power down to * active state. */ #define QPNP_BSI_MAX_SLAVE_POWER_UP_DELAY_MS 10 #define QPNP_BSI_POWER_UP_LOW_DELAY_US 240 /* * Latencies that are used when determining if polling or interrupts should be * used for a given transaction. */ #define QPNP_BSI_MAX_IRQ_LATENCY_US 170 #define QPNP_BSI_MAX_BSI_DATA_READ_LATENCY_US 16 static int qpnp_bsi_set_bus_state(struct bif_ctrl_dev *bdev, int state); static inline int qpnp_bsi_read(struct qpnp_bsi_chip *chip, u16 addr, u8 *buf, int len) { int rc; rc = spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->base_addr + addr, buf, len); if (rc) dev_err(&chip->spmi_dev->dev, "%s: spmi_ext_register_readl() failed. sid=%d, addr=%04X, len=%d, rc=%d\n", __func__, chip->spmi_dev->sid, chip->base_addr + addr, len, rc); return rc; } static inline int qpnp_bsi_write(struct qpnp_bsi_chip *chip, u16 addr, u8 *buf, int len) { int rc; rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->base_addr + addr, buf, len); if (rc) dev_err(&chip->spmi_dev->dev, "%s: spmi_ext_register_writel() failed. sid=%d, addr=%04X, len=%d, rc=%d\n", __func__, chip->spmi_dev->sid, chip->base_addr + addr, len, rc); return rc; } enum qpnp_bsi_rx_tx_state { QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_DATA, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_INT, QPNP_BSI_RX_TX_STATE_RX_INT_TX_DATA, QPNP_BSI_RX_TX_STATE_RX_DATA_TX_DATA, QPNP_BSI_RX_TX_STATE_RX_INT_TX_OFF, }; static int qpnp_bsi_rx_tx_config(struct qpnp_bsi_chip *chip, enum qpnp_bsi_rx_tx_state state) { u8 buf[2] = {0, 0}; int rc; buf[0] = QPNP_BSI_MODE_TX_FORMAT_11_BIT | QPNP_BSI_MODE_RX_FORMAT_11_BIT; switch (state) { case QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF: buf[0] |= QPNP_BSI_MODE_TX_PULSE_DATA | QPNP_BSI_MODE_RX_PULSE_DATA; buf[1] = QPNP_BSI_TX_DISABLE | QPNP_BSI_RX_DISABLE; break; case QPNP_BSI_RX_TX_STATE_RX_OFF_TX_DATA: buf[0] |= QPNP_BSI_MODE_TX_PULSE_DATA | QPNP_BSI_MODE_RX_PULSE_DATA; buf[1] = QPNP_BSI_TX_ENABLE | QPNP_BSI_RX_DISABLE; break; case QPNP_BSI_RX_TX_STATE_RX_OFF_TX_INT: buf[0] |= QPNP_BSI_MODE_TX_PULSE_INT | QPNP_BSI_MODE_RX_PULSE_DATA; buf[1] = QPNP_BSI_TX_ENABLE | QPNP_BSI_RX_DISABLE; break; case QPNP_BSI_RX_TX_STATE_RX_INT_TX_DATA: buf[0] |= QPNP_BSI_MODE_TX_PULSE_DATA | QPNP_BSI_MODE_RX_PULSE_INT; buf[1] = QPNP_BSI_TX_ENABLE | QPNP_BSI_RX_ENABLE; break; case QPNP_BSI_RX_TX_STATE_RX_DATA_TX_DATA: buf[0] |= QPNP_BSI_MODE_TX_PULSE_DATA | QPNP_BSI_MODE_RX_PULSE_DATA; buf[1] = QPNP_BSI_TX_ENABLE | QPNP_BSI_RX_ENABLE; break; case QPNP_BSI_RX_TX_STATE_RX_INT_TX_OFF: buf[0] |= QPNP_BSI_MODE_TX_PULSE_DATA | QPNP_BSI_MODE_RX_PULSE_INT; buf[1] = QPNP_BSI_TX_DISABLE | QPNP_BSI_RX_DISABLE; break; default: dev_err(&chip->spmi_dev->dev, "%s: invalid state=%d\n", __func__, state); return -EINVAL; } rc = qpnp_bsi_write(chip, QPNP_BSI_REG_MODE, buf, 2); if (rc) dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } static void qpnp_bsi_slave_irq_work(struct work_struct *work) { struct qpnp_bsi_chip *chip = container_of(work, struct qpnp_bsi_chip, slave_irq_work); int rc; rc = bif_ctrl_notify_slave_irq(chip->bdev); if (rc) pr_err("Could not notify BIF core about slave interrupt, rc=%d\n", rc); } static irqreturn_t qpnp_bsi_isr(int irq, void *data) { struct qpnp_bsi_chip *chip = data; bool found = false; int i; for (i = 0; i < QPNP_BSI_IRQ_COUNT; i++) { if (irq == chip->irq[i]) { found = true; atomic_cmpxchg(&chip->irq_flag[i], 0, 1); /* Check if this is a slave interrupt. */ if (i == QPNP_BSI_IRQ_RX && chip->state == BIF_BUS_STATE_INTERRUPT) { /* Slave IRQ makes the bus active. */ qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); chip->state = BIF_BUS_STATE_ACTIVE; schedule_work(&chip->slave_irq_work); } } } if (!found) pr_err("Unknown interrupt: %d\n", irq); return IRQ_HANDLED; } static irqreturn_t qpnp_bsi_batt_present_isr(int irq, void *data) { struct qpnp_bsi_chip *chip = data; int rc; if (!chip->bdev) return IRQ_HANDLED; rc = bif_ctrl_notify_battery_changed(chip->bdev); if (rc) pr_err("Could not notify about battery state change, rc=%d\n", rc); return IRQ_HANDLED; } static void qpnp_bsi_set_com_mode(struct qpnp_bsi_chip *chip, enum qpnp_bsi_com_mode mode) { int i; if (chip->com_mode == mode) return; if (mode == QPNP_BSI_COM_MODE_IRQ) for (i = 0; i < QPNP_BSI_IRQ_COUNT; i++) enable_irq(chip->irq[i]); else for (i = 0; i < QPNP_BSI_IRQ_COUNT; i++) disable_irq(chip->irq[i]); chip->com_mode = mode; } static inline bool qpnp_bsi_check_irq(struct qpnp_bsi_chip *chip, int irq) { return atomic_cmpxchg(&chip->irq_flag[irq], 1, 0); } static void qpnp_bsi_clear_irq_flags(struct qpnp_bsi_chip *chip) { int i; for (i = 0; i < QPNP_BSI_IRQ_COUNT; i++) atomic_set(&chip->irq_flag[i], 0); } static inline int qpnp_bsi_get_tau_ns(struct qpnp_bsi_chip *chip) { if (chip->tau_sampling_mask == QPNP_BSI_TAU_CONFIG_SAMPLE_4X) return qpnp_bsi_tau_period.period_4x_ns[chip->tau_index]; else return qpnp_bsi_tau_period.period_8x_ns[chip->tau_index]; } static inline int qpnp_bsi_get_tau_us(struct qpnp_bsi_chip *chip) { if (chip->tau_sampling_mask == QPNP_BSI_TAU_CONFIG_SAMPLE_4X) return qpnp_bsi_tau_period.period_4x_us[chip->tau_index]; else return qpnp_bsi_tau_period.period_8x_us[chip->tau_index]; } /* Checks if BSI is in an error state and clears the error if it is. */ static int qpnp_bsi_clear_bsi_error(struct qpnp_bsi_chip *chip) { int rc, delay_us; u8 reg; rc = qpnp_bsi_read(chip, QPNP_BSI_REG_BSI_ERROR, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_read() failed, rc=%d\n", __func__, rc); return rc; } if (reg > 0) { /* * Delay before clearing the BSI error in case a transaction is * still in flight. */ delay_us = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip); udelay(delay_us); pr_info("PMIC BSI module in error state, error=%d\n", reg); reg = QPNP_BSI_BSI_ERROR_CLEAR; rc = qpnp_bsi_write(chip, QPNP_BSI_REG_CLEAR_ERROR, &reg, 1); if (rc) dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); } return rc; } static int qpnp_bsi_get_bsi_error(struct qpnp_bsi_chip *chip) { int rc; u8 reg; rc = qpnp_bsi_read(chip, QPNP_BSI_REG_BSI_ERROR, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_read() failed, rc=%d\n", __func__, rc); return rc; } return reg; } static int qpnp_bsi_wait_for_tx(struct qpnp_bsi_chip *chip, int timeout) { int rc = 0; /* Wait for TX or ERR IRQ. */ while (timeout > 0) { if (qpnp_bsi_check_irq(chip, QPNP_BSI_IRQ_ERR)) { dev_err(&chip->spmi_dev->dev, "%s: transaction error occurred, BSI error=%d\n", __func__, qpnp_bsi_get_bsi_error(chip)); return -EIO; } if (qpnp_bsi_check_irq(chip, QPNP_BSI_IRQ_TX)) break; udelay(1); timeout--; } if (timeout == 0) { rc = -ETIMEDOUT; dev_err(&chip->spmi_dev->dev, "%s: transaction timed out, no interrupts received, rc=%d\n", __func__, rc); return rc; } return rc; } static int qpnp_bsi_issue_transaction(struct qpnp_bsi_chip *chip, int transaction, u8 data) { int rc; u8 buf[4]; /* MIPI_BIF_DATA_TX_0 = BIF word bits 7 to 0 */ buf[0] = data; /* MIPI_BIF_DATA_TX_1 = BIF word BCF, bits 9 to 8 */ buf[1] = transaction & QPNP_BSI_TX_DATA_HIGH_MASK; /* MIPI_BIF_DATA_TX_2 ignored */ buf[2] = 0x00; /* MIPI_BIF_TX_CTL bit 0 written to start the transaction. */ buf[3] = QPNP_BSI_TX_CTRL_GO; /* Write the TX_DATA bytes and initiate the transaction. */ rc = qpnp_bsi_write(chip, QPNP_BSI_REG_TX_DATA_LOW, buf, 4); if (rc) dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } static int qpnp_bsi_issue_transaction_wait_for_tx(struct qpnp_bsi_chip *chip, int transaction, u8 data) { int rc, timeout; rc = qpnp_bsi_issue_transaction(chip, transaction, data); if (rc) return rc; timeout = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip) + QPNP_BSI_MAX_IRQ_LATENCY_US; rc = qpnp_bsi_wait_for_tx(chip, timeout); return rc; } static int qpnp_bsi_wait_for_rx(struct qpnp_bsi_chip *chip, int timeout) { int rc = 0; /* Wait for RX IRQ to indicate that data is ready to read. */ while (timeout > 0) { if (qpnp_bsi_check_irq(chip, QPNP_BSI_IRQ_ERR)) { dev_err(&chip->spmi_dev->dev, "%s: transaction error occurred, BSI error=%d\n", __func__, qpnp_bsi_get_bsi_error(chip)); return -EIO; } if (qpnp_bsi_check_irq(chip, QPNP_BSI_IRQ_RX)) break; udelay(1); timeout--; } if (timeout == 0) rc = -ETIMEDOUT; return rc; } static int qpnp_bsi_bus_transaction(struct bif_ctrl_dev *bdev, int transaction, u8 data) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_IRQ); rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_DATA); if (rc) return rc; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); rc = qpnp_bsi_issue_transaction_wait_for_tx(chip, transaction, data); if (rc) return rc; rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); return rc; } static int qpnp_bsi_bus_transaction_query(struct bif_ctrl_dev *bdev, int transaction, u8 data, bool *query_response) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc, timeout; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_IRQ); rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_INT_TX_DATA); if (rc) return rc; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); rc = qpnp_bsi_issue_transaction_wait_for_tx(chip, transaction, data); if (rc) return rc; timeout = QPNP_BSI_MAX_BUS_QUERY_CYCLES * qpnp_bsi_get_tau_us(chip) + QPNP_BSI_MAX_IRQ_LATENCY_US; rc = qpnp_bsi_wait_for_rx(chip, timeout); if (rc == 0) { *query_response = true; } else if (rc == -ETIMEDOUT) { *query_response = false; rc = 0; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); return rc; } static int qpnp_bsi_bus_transaction_read(struct bif_ctrl_dev *bdev, int transaction, u8 data, int *response) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc, timeout; u8 buf[3]; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_IRQ); rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_DATA_TX_DATA); if (rc) return rc; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); rc = qpnp_bsi_issue_transaction_wait_for_tx(chip, transaction, data); if (rc) return rc; timeout = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip) + QPNP_BSI_MAX_IRQ_LATENCY_US; rc = qpnp_bsi_wait_for_rx(chip, timeout); if (rc) { if (rc == -ETIMEDOUT) { /* * No error message is printed in this case in order * to provide silent operation when checking if a slave * is selected using the transaction query bus command. */ dev_dbg(&chip->spmi_dev->dev, "%s: transaction timed out, no interrupts received, rc=%d\n", __func__, rc); } return rc; } /* Read the RX_DATA bytes. */ rc = qpnp_bsi_read(chip, QPNP_BSI_REG_RX_DATA_LOW, buf, 3); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_read() failed, rc=%d\n", __func__, rc); return rc; } if (buf[2] & QPNP_BSI_RX_SRC_LOOPBACK_FLAG) { rc = -EIO; dev_err(&chip->spmi_dev->dev, "%s: unexpected loopback data read, rc=%d\n", __func__, rc); return rc; } *response = ((int)(buf[1] & QPNP_BSI_RX_DATA_HIGH_MASK) << 8) | buf[0]; rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); return 0; } /* * Wait for RX_FLOW_STATUS to be set to 1 which indicates that another BIF word * can be read from PMIC registers. */ static int qpnp_bsi_wait_for_rx_data(struct qpnp_bsi_chip *chip) { int rc = 0; int timeout; u8 reg; timeout = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip); /* Wait for RX_FLOW_STATUS == 1 or ERR_FLAG == 1. */ while (timeout > 0) { rc = qpnp_bsi_read(chip, QPNP_BSI_REG_STATUS, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } if (reg & QPNP_BSI_STATUS_ERROR) { dev_err(&chip->spmi_dev->dev, "%s: transaction error occurred, BSI error=%d\n", __func__, qpnp_bsi_get_bsi_error(chip)); return -EIO; } if (reg & QPNP_BSI_STATUS_RX_DATA_READY) { /* BSI RX has data word latched. */ return 0; } udelay(1); timeout--; } rc = -ETIMEDOUT; dev_err(&chip->spmi_dev->dev, "%s: transaction timed out, RX_FLOW_STATUS never set to 1, rc=%d\n", __func__, rc); return rc; } /* * Wait for TX_GO_STATUS to be set to 0 which indicates that another BIF word * can be enqueued. */ static int qpnp_bsi_wait_for_tx_go(struct qpnp_bsi_chip *chip) { int rc = 0; int timeout; u8 reg; timeout = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip); /* Wait for TX_GO_STATUS == 0 or ERR_FLAG == 1. */ while (timeout > 0) { rc = qpnp_bsi_read(chip, QPNP_BSI_REG_STATUS, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } if (reg & QPNP_BSI_STATUS_ERROR) { dev_err(&chip->spmi_dev->dev, "%s: transaction error occurred, BSI error=%d\n", __func__, qpnp_bsi_get_bsi_error(chip)); return -EIO; } if (!(reg & QPNP_BSI_STATUS_TX_GO_BUSY)) { /* BSI TX is ready to accept the next word. */ return 0; } udelay(1); timeout--; } rc = -ETIMEDOUT; dev_err(&chip->spmi_dev->dev, "%s: transaction timed out, TX_GO_STATUS never set to 0, rc=%d\n", __func__, rc); return rc; } /* * Wait for TX_BUSY to be set to 0 which indicates that the TX data has been * successfully transmitted. */ static int qpnp_bsi_wait_for_tx_idle(struct qpnp_bsi_chip *chip) { int rc = 0; int timeout; u8 reg; timeout = QPNP_BSI_MAX_TRANSMIT_CYCLES * qpnp_bsi_get_tau_us(chip); /* Wait for TX_BUSY == 0 or ERR_FLAG == 1. */ while (timeout > 0) { rc = qpnp_bsi_read(chip, QPNP_BSI_REG_STATUS, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } if (reg & QPNP_BSI_STATUS_ERROR) { dev_err(&chip->spmi_dev->dev, "%s: transaction error occurred, BSI error=%d\n", __func__, qpnp_bsi_get_bsi_error(chip)); return -EIO; } if (!(reg & QPNP_BSI_STATUS_TX_BUSY)) { /* BSI TX is idle. */ return 0; } udelay(1); timeout--; } rc = -ETIMEDOUT; dev_err(&chip->spmi_dev->dev, "%s: transaction timed out, TX_BUSY never set to 0, rc=%d\n", __func__, rc); return rc; } /* * For burst read length greater than 1, send necessary RBL and RBE BIF bus * commands. */ static int qpnp_bsi_send_burst_length(struct qpnp_bsi_chip *chip, int burst_len) { int rc = 0; /* * Send burst read length bus commands according to the following: * * 1 --> No RBE or RBL * 2 - 15 = x --> RBLx * 16 - 255 = 16 * y + x --> RBEy and RBLx (RBL0 not sent) * 256 --> RBL0 */ if (burst_len == 256) { rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_BC, BIF_CMD_RBL); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; } else if (burst_len >= 16) { rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_BC, BIF_CMD_RBE + (burst_len / 16)); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; } if (burst_len % 16 && burst_len > 1) { rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_BC, BIF_CMD_RBL + (burst_len % 16)); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; } return rc; } /* Perform validation steps on received BIF data. */ static int qpnp_bsi_validate_rx_data(struct qpnp_bsi_chip *chip, int response, u8 rx2_data, bool last_word) { int err = -EIO; if (rx2_data & QPNP_BSI_RX_SRC_LOOPBACK_FLAG) { dev_err(&chip->spmi_dev->dev, "%s: unexpected loopback data read, rc=%d\n", __func__, err); return err; } if (!(response & BIF_SLAVE_RD_ACK)) { dev_err(&chip->spmi_dev->dev, "%s: BIF register read error=0x%02X\n", __func__, response & BIF_SLAVE_RD_ERR); return err; } if (last_word && !(response & BIF_SLAVE_RD_EOT)) { dev_err(&chip->spmi_dev->dev, "%s: BIF register read error, last RD packet has EOT=0\n", __func__); return err; } else if (!last_word && (response & BIF_SLAVE_RD_EOT)) { dev_err(&chip->spmi_dev->dev, "%s: BIF register read error, RD packet other than last has EOT=1\n", __func__); return err; } return 0; } /* Performs all BIF transactions in order to utilize burst reads. */ static int qpnp_bsi_read_slave_registers(struct bif_ctrl_dev *bdev, u16 addr, u8 *data, int len) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int response = 0; unsigned long flags; int rc, rc2, i, burst_len; u8 buf[3]; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_POLL); rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_DATA_TX_DATA); if (rc) return rc; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); while (len > 0) { burst_len = min(len, 256); rc = qpnp_bsi_send_burst_length(chip, burst_len); if (rc) return rc; rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_ERA, addr >> 8); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; /* Perform burst read in atomic context. */ local_irq_save(flags); rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_RRA, addr & 0xFF); if (rc) goto burst_err; for (i = 0; i < burst_len; i++) { rc = qpnp_bsi_wait_for_rx_data(chip); if (rc) goto burst_err; /* Read the RX_DATA bytes. */ rc = qpnp_bsi_read(chip, QPNP_BSI_REG_RX_DATA_LOW, buf, 3); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_read() failed, rc=%d\n", __func__, rc); goto burst_err; } response = ((buf[1] & QPNP_BSI_RX_DATA_HIGH_MASK) << 8) | buf[0]; rc = qpnp_bsi_validate_rx_data(chip, response, buf[2], i == burst_len - 1); if (rc) goto burst_err; data[i] = buf[0]; } local_irq_restore(flags); addr += burst_len; data += burst_len; len -= burst_len; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); return rc; burst_err: local_irq_restore(flags); rc2 = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); if (rc2 < 0) rc = rc2; return rc; } /* Performs all BIF transactions in order to utilize burst writes. */ static int qpnp_bsi_write_slave_registers(struct bif_ctrl_dev *bdev, u16 addr, const u8 *data, int len) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); unsigned long flags; int rc, rc2, i; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_POLL); rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_DATA); if (rc) return rc; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_ERA, addr >> 8); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_WRA, addr & 0xFF); if (rc) return rc; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) return rc; /* Perform burst write in atomic context. */ local_irq_save(flags); for (i = 0; i < len; i++) { rc = qpnp_bsi_issue_transaction(chip, BIF_TRANS_WD, data[i]); if (rc) goto burst_err; rc = qpnp_bsi_wait_for_tx_go(chip); if (rc) goto burst_err; } rc = qpnp_bsi_wait_for_tx_idle(chip); if (rc) goto burst_err; local_irq_restore(flags); rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); return rc; burst_err: local_irq_restore(flags); rc2 = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_OFF_TX_OFF); if (rc2 < 0) rc = rc2; return rc; } static int qpnp_bsi_bus_set_interrupt_mode(struct bif_ctrl_dev *bdev) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc; qpnp_bsi_set_com_mode(chip, QPNP_BSI_COM_MODE_IRQ); /* * Temporarily change the bus to active state so that the EINT command * can be issued. */ rc = qpnp_bsi_set_bus_state(bdev, BIF_BUS_STATE_ACTIVE); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to set bus state, rc=%d\n", __func__, rc); return rc; } rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_INT_TX_DATA); if (rc) return rc; /* * Set the bus state to interrupt mode so that an RX interrupt which * occurs immediately after issuing the EINT command is handled * properly. */ chip->state = BIF_BUS_STATE_INTERRUPT; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; qpnp_bsi_clear_irq_flags(chip); /* Send EINT bus command. */ rc = qpnp_bsi_issue_transaction_wait_for_tx(chip, BIF_TRANS_BC, BIF_CMD_EINT); if (rc) return rc; rc = qpnp_bsi_rx_tx_config(chip, QPNP_BSI_RX_TX_STATE_RX_INT_TX_OFF); return rc; } static int qpnp_bsi_bus_set_active_mode(struct bif_ctrl_dev *bdev, int prev_state) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc; u8 buf[2]; rc = qpnp_bsi_clear_bsi_error(chip); if (rc) return rc; buf[0] = QPNP_BSI_MODE_TX_PULSE_INT | QPNP_BSI_MODE_RX_PULSE_DATA; buf[1] = QPNP_BSI_TX_ENABLE | QPNP_BSI_RX_DISABLE; if (prev_state == BIF_BUS_STATE_INTERRUPT) buf[0] |= QPNP_BSI_MODE_TX_PULSE_T_1_TAU; else buf[0] |= QPNP_BSI_MODE_TX_PULSE_T_WAKE; rc = qpnp_bsi_write(chip, QPNP_BSI_REG_MODE, buf, 2); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } buf[0] = QPNP_BSI_TX_CTRL_GO; /* Initiate BCL low pulse. */ rc = qpnp_bsi_write(chip, QPNP_BSI_REG_TX_CTRL, buf, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } switch (prev_state) { case BIF_BUS_STATE_INTERRUPT: udelay(qpnp_bsi_get_tau_us(chip) * 4); break; case BIF_BUS_STATE_STANDBY: udelay(qpnp_bsi_get_tau_us(chip) + QPNP_BSI_MAX_SLAVE_ACTIVIATION_DELAY_US + QPNP_BSI_POWER_UP_LOW_DELAY_US); break; case BIF_BUS_STATE_POWER_DOWN: case BIF_BUS_STATE_MASTER_DISABLED: msleep(QPNP_BSI_MAX_SLAVE_POWER_UP_DELAY_MS); break; } return rc; } static int qpnp_bsi_get_bus_state(struct bif_ctrl_dev *bdev) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); return chip->state; } static int qpnp_bsi_set_bus_state(struct bif_ctrl_dev *bdev, int state) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); int rc = 0; u8 reg; if (state == chip->state) return 0; if (chip->state == BIF_BUS_STATE_MASTER_DISABLED) { /* * Enable the BSI peripheral when transitioning from a disabled * bus state to any of the active bus states so that BIF * transactions can take place. */ reg = QPNP_BSI_ENABLE; rc = qpnp_bsi_write(chip, QPNP_BSI_REG_ENABLE, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } } switch (state) { case BIF_BUS_STATE_MASTER_DISABLED: /* Disable the BSI peripheral. */ reg = QPNP_BSI_DISABLE; rc = qpnp_bsi_write(chip, QPNP_BSI_REG_ENABLE, &reg, 1); if (rc) dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); break; case BIF_BUS_STATE_POWER_DOWN: rc = qpnp_bsi_bus_transaction(bdev, BIF_TRANS_BC, BIF_CMD_PDWN); if (rc) dev_err(&chip->spmi_dev->dev, "%s: failed to enable power down mode, rc=%d\n", __func__, rc); break; case BIF_BUS_STATE_STANDBY: rc = qpnp_bsi_bus_transaction(bdev, BIF_TRANS_BC, BIF_CMD_STBY); if (rc) dev_err(&chip->spmi_dev->dev, "%s: failed to enable standby mode, rc=%d\n", __func__, rc); break; case BIF_BUS_STATE_ACTIVE: rc = qpnp_bsi_bus_set_active_mode(bdev, chip->state); if (rc) dev_err(&chip->spmi_dev->dev, "%s: failed to enable active mode, rc=%d\n", __func__, rc); break; case BIF_BUS_STATE_INTERRUPT: /* * qpnp_bsi_bus_set_interrupt_mode() internally sets * chip->state = BIF_BUS_STATE_INTERRUPT immediately before * issuing the EINT command. */ rc = qpnp_bsi_bus_set_interrupt_mode(bdev); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: failed to enable interrupt mode, rc=%d\n", __func__, rc); } else if (chip->state == BIF_BUS_STATE_ACTIVE) { /* * A slave interrupt was received immediately after * issuing the EINT command. Therefore, stay in active * communication mode. */ state = BIF_BUS_STATE_ACTIVE; } break; default: rc = -EINVAL; dev_err(&chip->spmi_dev->dev, "%s: invalid state=%d\n", __func__, state); } if (!rc) chip->state = state; return rc; } /* Returns the smallest tau_bif that is greater than or equal to period_ns. */ static int qpnp_bsi_tau_bif_higher(int period_ns, int sample_mask) { const int *supported_period_ns = (sample_mask == QPNP_BSI_TAU_CONFIG_SAMPLE_4X ? qpnp_bsi_tau_period.period_4x_ns : qpnp_bsi_tau_period.period_8x_ns); int smallest_tau_bif = INT_MAX; int i; for (i = QPNP_BSI_NUM_CLOCK_PERIODS - 1; i >= 0; i--) { if (period_ns <= supported_period_ns[i]) { smallest_tau_bif = supported_period_ns[i]; break; } } return smallest_tau_bif; } /* Returns the largest tau_bif that is less than or equal to period_ns. */ static int qpnp_bsi_tau_bif_lower(int period_ns, int sample_mask) { const int *supported_period_ns = (sample_mask == QPNP_BSI_TAU_CONFIG_SAMPLE_4X ? qpnp_bsi_tau_period.period_4x_ns : qpnp_bsi_tau_period.period_8x_ns); int largest_tau_bif = 0; int i; for (i = 0; i < QPNP_BSI_NUM_CLOCK_PERIODS; i++) { if (period_ns >= supported_period_ns[i]) { largest_tau_bif = supported_period_ns[i]; break; } } return largest_tau_bif; } /* * Moves period_ns into allowed range and then sets tau bif to the period that * is greater than or equal to period_ns. */ static int qpnp_bsi_set_tau_bif(struct qpnp_bsi_chip *chip, int period_ns) { const int *supported_period_ns = (chip->tau_sampling_mask == QPNP_BSI_TAU_CONFIG_SAMPLE_4X ? qpnp_bsi_tau_period.period_4x_ns : qpnp_bsi_tau_period.period_8x_ns); int idx = 0; int i, rc; u8 reg; if (period_ns < chip->bdesc.bus_clock_min_ns) period_ns = chip->bdesc.bus_clock_min_ns; else if (period_ns > chip->bdesc.bus_clock_max_ns) period_ns = chip->bdesc.bus_clock_max_ns; for (i = QPNP_BSI_NUM_CLOCK_PERIODS - 1; i >= 0; i--) { if (period_ns <= supported_period_ns[i]) { idx = i; break; } } /* Set the tau BIF clock period and sampling rate. */ reg = chip->tau_sampling_mask | idx; rc = qpnp_bsi_write(chip, QPNP_BSI_REG_TAU_CONFIG, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: qpnp_bsi_write() failed, rc=%d\n", __func__, rc); return rc; } chip->tau_index = idx; return 0; } static int qpnp_bsi_get_bus_period(struct bif_ctrl_dev *bdev) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); return qpnp_bsi_get_tau_ns(chip); } static int qpnp_bsi_set_bus_period(struct bif_ctrl_dev *bdev, int period_ns) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); return qpnp_bsi_set_tau_bif(chip, period_ns); } static int qpnp_bsi_get_battery_rid(struct bif_ctrl_dev *bdev) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); struct qpnp_vadc_result adc_result; int rid_ohm, vid_uV, rc; s64 temp; if (chip->batt_id_adc_channel >= ADC_MAX_NUM) { dev_err(&chip->spmi_dev->dev, "%s: no ADC channel specified for Rid measurement\n", __func__); return -ENXIO; } rc = qpnp_vadc_read(chip->vadc_dev, chip->batt_id_adc_channel, &adc_result); if (!rc) { vid_uV = adc_result.physical; if (chip->vid_ref_uV - vid_uV <= 0) { rid_ohm = INT_MAX; } else { temp = (s64)chip->r_pullup_ohm * (s64)vid_uV; do_div(temp, chip->vid_ref_uV - vid_uV); if (temp > INT_MAX) rid_ohm = INT_MAX; else rid_ohm = temp; } } else { dev_err(&chip->spmi_dev->dev, "%s: qpnp_vadc_read(%d) failed, rc=%d\n", __func__, chip->batt_id_adc_channel, rc); rid_ohm = rc; } return rid_ohm; } /* * Returns 1 if a battery pack is present on the BIF bus, 0 if a battery pack * is not present, or errno if detection fails. * * Battery detection is based upon the idle BCL voltage. */ static int qpnp_bsi_get_battery_presence(struct bif_ctrl_dev *bdev) { struct qpnp_bsi_chip *chip = bdev_get_drvdata(bdev); u8 reg = 0x00; int rc; rc = spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->batt_id_stat_addr, &reg, 1); if (rc) { dev_err(&chip->spmi_dev->dev, "%s: spmi_ext_register_readl() failed, rc=%d\n", __func__, rc); return rc; } return !!(reg & QPNP_SMBB_BAT_IF_BATT_PRES_MASK); } static struct bif_ctrl_ops qpnp_bsi_ops = { .bus_transaction = qpnp_bsi_bus_transaction, .bus_transaction_query = qpnp_bsi_bus_transaction_query, .bus_transaction_read = qpnp_bsi_bus_transaction_read, .get_bus_state = qpnp_bsi_get_bus_state, .set_bus_state = qpnp_bsi_set_bus_state, .get_bus_period = qpnp_bsi_get_bus_period, .set_bus_period = qpnp_bsi_set_bus_period, .read_slave_registers = qpnp_bsi_read_slave_registers, .write_slave_registers = qpnp_bsi_write_slave_registers, .get_battery_rid = qpnp_bsi_get_battery_rid, .get_battery_presence = qpnp_bsi_get_battery_presence, }; /* Load all BSI properties from device tree. */ static int __devinit qpnp_bsi_parse_dt(struct qpnp_bsi_chip *chip, struct spmi_device *spmi) { struct device *dev = &spmi->dev; struct device_node *node = spmi->dev.of_node; struct resource *res; int rc, temp; chip->batt_id_adc_channel = ADC_MAX_NUM; rc = of_property_read_u32(node, "qcom,channel-num", &chip->batt_id_adc_channel); if (!rc && (chip->batt_id_adc_channel < 0 || chip->batt_id_adc_channel >= ADC_MAX_NUM)) { dev_err(dev, "%s: invalid qcom,channel-num=%d specified\n", __func__, chip->batt_id_adc_channel); return -EINVAL; } chip->r_pullup_ohm = QPNP_BSI_DEFAULT_PULLUP_OHM; rc = of_property_read_u32(node, "qcom,pullup-ohms", &chip->r_pullup_ohm); if (!rc && (chip->r_pullup_ohm < QPNP_BSI_MIN_PULLUP_OHM || chip->r_pullup_ohm > QPNP_BSI_MAX_PULLUP_OHM)) { dev_err(dev, "%s: invalid qcom,pullup-ohms=%d property value\n", __func__, chip->r_pullup_ohm); return -EINVAL; } chip->vid_ref_uV = QPNP_BSI_DEFAULT_VID_REF_UV; rc = of_property_read_u32(node, "qcom,vref-microvolts", &chip->vid_ref_uV); if (!rc && (chip->vid_ref_uV < QPNP_BSI_MIN_VID_REF_UV || chip->vid_ref_uV > QPNP_BSI_MAX_VID_REF_UV)) { dev_err(dev, "%s: invalid qcom,vref-microvolts=%d property value\n", __func__, chip->vid_ref_uV); return -EINVAL; } res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM, "bsi-base"); if (!res) { dev_err(dev, "%s: node is missing BSI base address\n", __func__); return -EINVAL; } chip->base_addr = res->start; res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM, "batt-id-status"); if (!res) { dev_err(dev, "%s: node is missing BATT_ID status address\n", __func__); return -EINVAL; } chip->batt_id_stat_addr = res->start; chip->bdesc.name = spmi_get_primary_dev_name(spmi); if (!chip->bdesc.name) { dev_err(dev, "%s: label binding undefined for node %s\n", __func__, spmi->dev.of_node->full_name); return -EINVAL; } /* Use maximum range by default. */ chip->bdesc.bus_clock_min_ns = QPNP_BSI_MIN_CLOCK_SPEED_NS; chip->bdesc.bus_clock_max_ns = QPNP_BSI_MAX_CLOCK_SPEED_NS; chip->tau_sampling_mask = QPNP_BSI_TAU_CONFIG_SAMPLE_4X; rc = of_property_read_u32(node, "qcom,sample-rate", &temp); if (rc == 0) { if (temp == 4) { chip->tau_sampling_mask = QPNP_BSI_TAU_CONFIG_SAMPLE_4X; } else if (temp == 8) { chip->tau_sampling_mask = QPNP_BSI_TAU_CONFIG_SAMPLE_8X; } else { dev_err(dev, "%s: invalid qcom,sample-rate=%d. Only values of 4 and 8 are supported.\n", __func__, temp); return -EINVAL; } } rc = of_property_read_u32(node, "qcom,min-clock-period", &temp); if (rc == 0) chip->bdesc.bus_clock_min_ns = qpnp_bsi_tau_bif_higher(temp, chip->tau_sampling_mask); rc = of_property_read_u32(node, "qcom,max-clock-period", &temp); if (rc == 0) chip->bdesc.bus_clock_max_ns = qpnp_bsi_tau_bif_lower(temp, chip->tau_sampling_mask); if (chip->bdesc.bus_clock_min_ns > chip->bdesc.bus_clock_max_ns) { dev_err(dev, "%s: invalid qcom,min/max-clock-period.\n", __func__); return -EINVAL; } chip->irq[QPNP_BSI_IRQ_ERR] = spmi_get_irq_byname(spmi, NULL, "err"); if (chip->irq[QPNP_BSI_IRQ_ERR] < 0) { dev_err(dev, "%s: node is missing err irq\n", __func__); return chip->irq[QPNP_BSI_IRQ_ERR]; } chip->irq[QPNP_BSI_IRQ_RX] = spmi_get_irq_byname(spmi, NULL, "rx"); if (chip->irq[QPNP_BSI_IRQ_RX] < 0) { dev_err(dev, "%s: node is missing rx irq\n", __func__); return chip->irq[QPNP_BSI_IRQ_RX]; } chip->irq[QPNP_BSI_IRQ_TX] = spmi_get_irq_byname(spmi, NULL, "tx"); if (chip->irq[QPNP_BSI_IRQ_TX] < 0) { dev_err(dev, "%s: node is missing tx irq\n", __func__); return chip->irq[QPNP_BSI_IRQ_TX]; } chip->batt_present_irq = spmi_get_irq_byname(spmi, NULL, "batt-present"); if (chip->batt_present_irq < 0) { dev_err(dev, "%s: node is missing batt-present irq\n", __func__); return chip->batt_present_irq; } return rc; } /* Request all BSI and battery presence IRQs and set them as wakeable. */ static int __devinit qpnp_bsi_init_irqs(struct qpnp_bsi_chip *chip, struct device *dev) { int rc; rc = devm_request_irq(dev, chip->irq[QPNP_BSI_IRQ_ERR], qpnp_bsi_isr, IRQF_TRIGGER_RISING, "bsi-err", chip); if (rc < 0) { dev_err(dev, "%s: request for bsi-err irq %d failed, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_ERR], rc); return rc; } rc = irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_ERR], 1); if (rc < 0) { dev_err(dev, "%s: unable to set bsi-err irq %d as wakeable, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_ERR], rc); return rc; } rc = devm_request_irq(dev, chip->irq[QPNP_BSI_IRQ_RX], qpnp_bsi_isr, IRQF_TRIGGER_RISING, "bsi-rx", chip); if (rc < 0) { dev_err(dev, "%s: request for bsi-rx irq %d failed, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_RX], rc); goto set_unwakeable_irq_err; } rc = irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_RX], 1); if (rc < 0) { dev_err(dev, "%s: unable to set bsi-rx irq %d as wakeable, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_RX], rc); goto set_unwakeable_irq_err; } rc = devm_request_irq(dev, chip->irq[QPNP_BSI_IRQ_TX], qpnp_bsi_isr, IRQF_TRIGGER_RISING, "bsi-tx", chip); if (rc < 0) { dev_err(dev, "%s: request for bsi-tx irq %d failed, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_TX], rc); goto set_unwakeable_irq_rx; } rc = irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_TX], 1); if (rc < 0) { dev_err(dev, "%s: unable to set bsi-tx irq %d as wakeable, rc=%d\n", __func__, chip->irq[QPNP_BSI_IRQ_TX], rc); goto set_unwakeable_irq_rx; } rc = devm_request_threaded_irq(dev, chip->batt_present_irq, NULL, qpnp_bsi_batt_present_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED | IRQF_ONESHOT, "bsi-batt-present", chip); if (rc < 0) { dev_err(dev, "%s: request for bsi-batt-present irq %d failed, rc=%d\n", __func__, chip->batt_present_irq, rc); goto set_unwakeable_irq_tx; } rc = irq_set_irq_wake(chip->batt_present_irq, 1); if (rc < 0) { dev_err(dev, "%s: unable to set bsi-batt-present irq %d as wakeable, rc=%d\n", __func__, chip->batt_present_irq, rc); goto set_unwakeable_irq_tx; } return rc; set_unwakeable_irq_tx: irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_TX], 0); set_unwakeable_irq_rx: irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_RX], 0); set_unwakeable_irq_err: irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_ERR], 0); return rc; } static void qpnp_bsi_cleanup_irqs(struct qpnp_bsi_chip *chip) { irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_ERR], 0); irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_RX], 0); irq_set_irq_wake(chip->irq[QPNP_BSI_IRQ_TX], 0); irq_set_irq_wake(chip->batt_present_irq, 0); } static int __devinit qpnp_bsi_probe(struct spmi_device *spmi) { struct device *dev = &spmi->dev; struct qpnp_bsi_chip *chip; int rc; u8 type[2]; if (!spmi->dev.of_node) { dev_err(dev, "%s: device node missing\n", __func__); return -ENODEV; } chip = devm_kzalloc(dev, sizeof(struct qpnp_bsi_chip), GFP_KERNEL); if (!chip) { dev_err(dev, "%s: Can't allocate qpnp_bsi\n", __func__); return -ENOMEM; } rc = qpnp_bsi_parse_dt(chip, spmi); if (rc) { dev_err(dev, "%s: device tree parsing failed, rc=%d\n", __func__, rc); return rc; } INIT_WORK(&chip->slave_irq_work, qpnp_bsi_slave_irq_work); rc = qpnp_bsi_init_irqs(chip, dev); if (rc) { dev_err(dev, "%s: IRQ initialization failed, rc=%d\n", __func__, rc); return rc; } chip->spmi_dev = spmi; chip->bdesc.ops = &qpnp_bsi_ops; chip->state = BIF_BUS_STATE_MASTER_DISABLED; chip->com_mode = QPNP_BSI_COM_MODE_IRQ; rc = qpnp_bsi_read(chip, QPNP_BSI_REG_TYPE, type, 2); if (rc) { dev_err(dev, "%s: could not read type register, rc=%d\n", __func__, rc); goto cleanup_irqs; } if (type[0] != QPNP_BSI_TYPE || type[1] != QPNP_BSI_SUBTYPE) { dev_err(dev, "%s: BSI peripheral is not present; type=0x%02X, subtype=0x%02X\n", __func__, type[0], type[1]); rc = -ENODEV; goto cleanup_irqs; } /* Ensure that ADC channel is available if it was specified. */ if (chip->batt_id_adc_channel < ADC_MAX_NUM) { chip->vadc_dev = qpnp_get_vadc(dev, "bsi"); if (IS_ERR(chip->vadc_dev)) { rc = PTR_ERR(chip->vadc_dev); if (rc != -EPROBE_DEFER) pr_err("missing vadc property, rc=%d\n", rc); /* Probe retry, do not print an error message */ goto cleanup_irqs; } } rc = qpnp_bsi_set_tau_bif(chip, chip->bdesc.bus_clock_min_ns); if (rc) { dev_err(dev, "%s: qpnp_bsi_set_tau_bif() failed, rc=%d\n", __func__, rc); goto cleanup_irqs; } chip->bdev = bif_ctrl_register(&chip->bdesc, dev, chip, spmi->dev.of_node); if (IS_ERR(chip->bdev)) { rc = PTR_ERR(chip->bdev); dev_err(dev, "%s: bif_ctrl_register failed, rc=%d\n", __func__, rc); goto cleanup_irqs; } dev_set_drvdata(dev, chip); return rc; cleanup_irqs: qpnp_bsi_cleanup_irqs(chip); return rc; } static int __devexit qpnp_bsi_remove(struct spmi_device *spmi) { struct qpnp_bsi_chip *chip = dev_get_drvdata(&spmi->dev); dev_set_drvdata(&spmi->dev, NULL); if (chip) { bif_ctrl_unregister(chip->bdev); qpnp_bsi_cleanup_irqs(chip); } return 0; } static struct of_device_id spmi_match_table[] = { { .compatible = QPNP_BSI_DRIVER_NAME, }, {} }; static const struct spmi_device_id qpnp_bsi_id[] = { { QPNP_BSI_DRIVER_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(spmi, qpnp_bsi_id); static struct spmi_driver qpnp_bsi_driver = { .driver = { .name = QPNP_BSI_DRIVER_NAME, .of_match_table = spmi_match_table, .owner = THIS_MODULE, }, .probe = qpnp_bsi_probe, .remove = __devexit_p(qpnp_bsi_remove), .id_table = qpnp_bsi_id, }; static int __init qpnp_bsi_init(void) { return spmi_driver_register(&qpnp_bsi_driver); } static void __exit qpnp_bsi_exit(void) { spmi_driver_unregister(&qpnp_bsi_driver); } MODULE_DESCRIPTION("QPNP PMIC BSI driver"); MODULE_LICENSE("GPL v2"); arch_initcall(qpnp_bsi_init); module_exit(qpnp_bsi_exit);
gpl-2.0
Krabappel2548/u8500_kernel_sources
.fr-dbVriS/kernel/kernel/time/jiffies.c
1661
2707
/*********************************************************************** * linux/kernel/time/jiffies.c * * This file contains the jiffies based clocksource. * * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ************************************************************************/ #include <linux/clocksource.h> #include <linux/jiffies.h> #include <linux/init.h> /* The Jiffies based clocksource is the lowest common * denominator clock source which should function on * all systems. It has the same coarse resolution as * the timer interrupt frequency HZ and it suffers * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer * interrupt hardware to accuratly tick at the * requested HZ value. It is also not reccomended * for "tick-less" systems. */ #define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier * conversion, the .shift value could be zero. However * this would make NTP adjustments impossible as they are * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to * shift both the nominator and denominator the same * amount, and give ntp adjustments in units of 1/2^8 * * The value 8 is somewhat carefully chosen, as anything * larger can result in overflows. NSEC_PER_JIFFY grows as * HZ shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ #define JIFFIES_SHIFT 8 static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } struct clocksource clocksource_jiffies = { .name = "jiffies", .rating = 1, /* lowest valid rating*/ .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .shift = JIFFIES_SHIFT, }; static int __init init_jiffies_clocksource(void) { return clocksource_register(&clocksource_jiffies); } core_initcall(init_jiffies_clocksource); struct clocksource * __init __weak clocksource_default_clock(void) { return &clocksource_jiffies; }
gpl-2.0
fkfk/sc02b_kernel
kernel/time/jiffies.c
1661
2707
/*********************************************************************** * linux/kernel/time/jiffies.c * * This file contains the jiffies based clocksource. * * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ************************************************************************/ #include <linux/clocksource.h> #include <linux/jiffies.h> #include <linux/init.h> /* The Jiffies based clocksource is the lowest common * denominator clock source which should function on * all systems. It has the same coarse resolution as * the timer interrupt frequency HZ and it suffers * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer * interrupt hardware to accuratly tick at the * requested HZ value. It is also not reccomended * for "tick-less" systems. */ #define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier * conversion, the .shift value could be zero. However * this would make NTP adjustments impossible as they are * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to * shift both the nominator and denominator the same * amount, and give ntp adjustments in units of 1/2^8 * * The value 8 is somewhat carefully chosen, as anything * larger can result in overflows. NSEC_PER_JIFFY grows as * HZ shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ #define JIFFIES_SHIFT 8 static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } struct clocksource clocksource_jiffies = { .name = "jiffies", .rating = 1, /* lowest valid rating*/ .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .shift = JIFFIES_SHIFT, }; static int __init init_jiffies_clocksource(void) { return clocksource_register(&clocksource_jiffies); } core_initcall(init_jiffies_clocksource); struct clocksource * __init __weak clocksource_default_clock(void) { return &clocksource_jiffies; }
gpl-2.0
rbauduin/mptcp
drivers/staging/iio/resolver/ad2s90.c
1661
2696
/* * ad2s90.c simple support for the ADI Resolver to Digital Converters: AD2S90 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> struct ad2s90_state { struct mutex lock; struct spi_device *sdev; u8 rx[2] ____cacheline_aligned; }; static int ad2s90_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { int ret; struct ad2s90_state *st = iio_priv(indio_dev); mutex_lock(&st->lock); ret = spi_read(st->sdev, st->rx, 2); if (ret) goto error_ret; *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); error_ret: mutex_unlock(&st->lock); return IIO_VAL_INT; } static const struct iio_info ad2s90_info = { .read_raw = &ad2s90_read_raw, .driver_module = THIS_MODULE, }; static const struct iio_chan_spec ad2s90_chan = { .type = IIO_ANGL, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }; static int ad2s90_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct ad2s90_state *st; int ret = 0; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); mutex_init(&st->lock); st->sdev = spi; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ad2s90_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = &ad2s90_chan; indio_dev->num_channels = 1; indio_dev->name = spi_get_device_id(spi)->name; ret = iio_device_register(indio_dev); if (ret) return ret; /* need 600ns between CS and the first falling edge of SCLK */ spi->max_speed_hz = 830000; spi->mode = SPI_MODE_3; spi_setup(spi); return 0; } static int ad2s90_remove(struct spi_device *spi) { iio_device_unregister(spi_get_drvdata(spi)); return 0; } static const struct spi_device_id ad2s90_id[] = { { "ad2s90" }, {} }; MODULE_DEVICE_TABLE(spi, ad2s90_id); static struct spi_driver ad2s90_driver = { .driver = { .name = "ad2s90", .owner = THIS_MODULE, }, .probe = ad2s90_probe, .remove = ad2s90_remove, .id_table = ad2s90_id, }; module_spi_driver(ad2s90_driver); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
SolidRun/linux-imx6-3.14
drivers/tty/serial/lantiq.c
2173
18501
/* * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) 2004 Infineon IFAP DC COM CPE * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2007 John Crispin <blogic@openwrt.org> * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/gpio.h> #include <lantiq_soc.h> #define PORT_LTQ_ASC 111 #define MAXPORTS 2 #define UART_DUMMY_UER_RX 1 #define DRVNAME "lantiq,asc" #ifdef __BIG_ENDIAN #define LTQ_ASC_TBUF (0x0020 + 3) #define LTQ_ASC_RBUF (0x0024 + 3) #else #define LTQ_ASC_TBUF 0x0020 #define LTQ_ASC_RBUF 0x0024 #endif #define LTQ_ASC_FSTAT 0x0048 #define LTQ_ASC_WHBSTATE 0x0018 #define LTQ_ASC_STATE 0x0014 #define LTQ_ASC_IRNCR 0x00F8 #define LTQ_ASC_CLC 0x0000 #define LTQ_ASC_ID 0x0008 #define LTQ_ASC_PISEL 0x0004 #define LTQ_ASC_TXFCON 0x0044 #define LTQ_ASC_RXFCON 0x0040 #define LTQ_ASC_CON 0x0010 #define LTQ_ASC_BG 0x0050 #define LTQ_ASC_IRNREN 0x00F4 #define ASC_IRNREN_TX 0x1 #define ASC_IRNREN_RX 0x2 #define ASC_IRNREN_ERR 0x4 #define ASC_IRNREN_TX_BUF 0x8 #define ASC_IRNCR_TIR 0x1 #define ASC_IRNCR_RIR 0x2 #define ASC_IRNCR_EIR 0x4 #define ASCOPT_CSIZE 0x3 #define TXFIFO_FL 1 #define RXFIFO_FL 1 #define ASCCLC_DISS 0x2 #define ASCCLC_RMCMASK 0x0000FF00 #define ASCCLC_RMCOFFSET 8 #define ASCCON_M_8ASYNC 0x0 #define ASCCON_M_7ASYNC 0x2 #define ASCCON_ODD 0x00000020 #define ASCCON_STP 0x00000080 #define ASCCON_BRS 0x00000100 #define ASCCON_FDE 0x00000200 #define ASCCON_R 0x00008000 #define ASCCON_FEN 0x00020000 #define ASCCON_ROEN 0x00080000 #define ASCCON_TOEN 0x00100000 #define ASCSTATE_PE 0x00010000 #define ASCSTATE_FE 0x00020000 #define ASCSTATE_ROE 0x00080000 #define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) #define ASCWHBSTATE_CLRREN 0x00000001 #define ASCWHBSTATE_SETREN 0x00000002 #define ASCWHBSTATE_CLRPE 0x00000004 #define ASCWHBSTATE_CLRFE 0x00000008 #define ASCWHBSTATE_CLRROE 0x00000020 #define ASCTXFCON_TXFEN 0x0001 #define ASCTXFCON_TXFFLU 0x0002 #define ASCTXFCON_TXFITLMASK 0x3F00 #define ASCTXFCON_TXFITLOFF 8 #define ASCRXFCON_RXFEN 0x0001 #define ASCRXFCON_RXFFLU 0x0002 #define ASCRXFCON_RXFITLMASK 0x3F00 #define ASCRXFCON_RXFITLOFF 8 #define ASCFSTAT_RXFFLMASK 0x003F #define ASCFSTAT_TXFFLMASK 0x3F00 #define ASCFSTAT_TXFREEMASK 0x3F000000 #define ASCFSTAT_TXFREEOFF 24 static void lqasc_tx_chars(struct uart_port *port); static struct ltq_uart_port *lqasc_port[MAXPORTS]; static struct uart_driver lqasc_reg; static DEFINE_SPINLOCK(ltq_asc_lock); struct ltq_uart_port { struct uart_port port; /* clock used to derive divider */ struct clk *fpiclk; /* clock gating of the ASC core */ struct clk *clk; unsigned int tx_irq; unsigned int rx_irq; unsigned int err_irq; }; static inline struct ltq_uart_port *to_ltq_uart_port(struct uart_port *port) { return container_of(port, struct ltq_uart_port, port); } static void lqasc_stop_tx(struct uart_port *port) { return; } static void lqasc_start_tx(struct uart_port *port) { unsigned long flags; spin_lock_irqsave(&ltq_asc_lock, flags); lqasc_tx_chars(port); spin_unlock_irqrestore(&ltq_asc_lock, flags); return; } static void lqasc_stop_rx(struct uart_port *port) { ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); } static void lqasc_enable_ms(struct uart_port *port) { } static int lqasc_rx_chars(struct uart_port *port) { struct tty_port *tport = &port->state->port; unsigned int ch = 0, rsr = 0, fifocnt; fifocnt = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; while (fifocnt--) { u8 flag = TTY_NORMAL; ch = ltq_r8(port->membase + LTQ_ASC_RBUF); rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) & ASCSTATE_ANY) | UART_DUMMY_UER_RX; tty_flip_buffer_push(tport); port->icount.rx++; /* * Note that the error handling code is * out of the main execution path */ if (rsr & ASCSTATE_ANY) { if (rsr & ASCSTATE_PE) { port->icount.parity++; ltq_w32_mask(0, ASCWHBSTATE_CLRPE, port->membase + LTQ_ASC_WHBSTATE); } else if (rsr & ASCSTATE_FE) { port->icount.frame++; ltq_w32_mask(0, ASCWHBSTATE_CLRFE, port->membase + LTQ_ASC_WHBSTATE); } if (rsr & ASCSTATE_ROE) { port->icount.overrun++; ltq_w32_mask(0, ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); } rsr &= port->read_status_mask; if (rsr & ASCSTATE_PE) flag = TTY_PARITY; else if (rsr & ASCSTATE_FE) flag = TTY_FRAME; } if ((rsr & port->ignore_status_mask) == 0) tty_insert_flip_char(tport, ch, flag); if (rsr & ASCSTATE_ROE) /* * Overrun is special, since it's reported * immediately, and doesn't affect the current * character */ tty_insert_flip_char(tport, 0, TTY_OVERRUN); } if (ch != 0) tty_flip_buffer_push(tport); return 0; } static void lqasc_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; if (uart_tx_stopped(port)) { lqasc_stop_tx(port); return; } while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { if (port->x_char) { ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); port->icount.tx++; port->x_char = 0; continue; } if (uart_circ_empty(xmit)) break; ltq_w8(port->state->xmit.buf[port->state->xmit.tail], port->membase + LTQ_ASC_TBUF); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static irqreturn_t lqasc_tx_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); spin_unlock_irqrestore(&ltq_asc_lock, flags); lqasc_start_tx(port); return IRQ_HANDLED; } static irqreturn_t lqasc_err_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); /* clear any pending interrupts */ ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); spin_unlock_irqrestore(&ltq_asc_lock, flags); return IRQ_HANDLED; } static irqreturn_t lqasc_rx_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); lqasc_rx_chars(port); spin_unlock_irqrestore(&ltq_asc_lock, flags); return IRQ_HANDLED; } static unsigned int lqasc_tx_empty(struct uart_port *port) { int status; status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; return status ? 0 : TIOCSER_TEMT; } static unsigned int lqasc_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; } static void lqasc_set_mctrl(struct uart_port *port, u_int mctrl) { } static void lqasc_break_ctl(struct uart_port *port, int break_state) { } static int lqasc_startup(struct uart_port *port) { struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); int retval; if (ltq_port->clk) clk_enable(ltq_port->clk); port->uartclk = clk_get_rate(ltq_port->fpiclk); ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), port->membase + LTQ_ASC_CLC); ltq_w32(0, port->membase + LTQ_ASC_PISEL); ltq_w32( ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, port->membase + LTQ_ASC_TXFCON); ltq_w32( ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); /* make sure other settings are written to hardware before * setting enable bits */ wmb(); ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN, port->membase + LTQ_ASC_CON); retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, 0, "asc_tx", port); if (retval) { pr_err("failed to request lqasc_tx_int\n"); return retval; } retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, 0, "asc_rx", port); if (retval) { pr_err("failed to request lqasc_rx_int\n"); goto err1; } retval = request_irq(ltq_port->err_irq, lqasc_err_int, 0, "asc_err", port); if (retval) { pr_err("failed to request lqasc_err_int\n"); goto err2; } ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, port->membase + LTQ_ASC_IRNREN); return 0; err2: free_irq(ltq_port->rx_irq, port); err1: free_irq(ltq_port->tx_irq, port); return retval; } static void lqasc_shutdown(struct uart_port *port) { struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); free_irq(ltq_port->tx_irq, port); free_irq(ltq_port->rx_irq, port); free_irq(ltq_port->err_irq, port); ltq_w32(0, port->membase + LTQ_ASC_CON); ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, port->membase + LTQ_ASC_TXFCON); if (ltq_port->clk) clk_disable(ltq_port->clk); } static void lqasc_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int cflag; unsigned int iflag; unsigned int divisor; unsigned int baud; unsigned int con = 0; unsigned long flags; cflag = new->c_cflag; iflag = new->c_iflag; switch (cflag & CSIZE) { case CS7: con = ASCCON_M_7ASYNC; break; case CS5: case CS6: default: new->c_cflag &= ~ CSIZE; new->c_cflag |= CS8; con = ASCCON_M_8ASYNC; break; } cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ if (cflag & CSTOPB) con |= ASCCON_STP; if (cflag & PARENB) { if (!(cflag & PARODD)) con &= ~ASCCON_ODD; else con |= ASCCON_ODD; } port->read_status_mask = ASCSTATE_ROE; if (iflag & INPCK) port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; port->ignore_status_mask = 0; if (iflag & IGNPAR) port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; if (iflag & IGNBRK) { /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (iflag & IGNPAR) port->ignore_status_mask |= ASCSTATE_ROE; } if ((cflag & CREAD) == 0) port->ignore_status_mask |= UART_DUMMY_UER_RX; /* set error signals - framing, parity and overrun, enable receiver */ con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; spin_lock_irqsave(&ltq_asc_lock, flags); /* set up CON */ ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); /* Set baud rate - take a divider of 2 into account */ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); divisor = uart_get_divisor(port, baud); divisor = divisor / 2 - 1; /* disable the baudrate generator */ ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); /* make sure the fractional divider is off */ ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); /* set up to use divisor of 2 */ ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); /* now we can write the new baudrate into the register */ ltq_w32(divisor, port->membase + LTQ_ASC_BG); /* turn the baudrate generator back on */ ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); /* enable rx */ ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); spin_unlock_irqrestore(&ltq_asc_lock, flags); /* Don't rewrite B0 */ if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); uart_update_timeout(port, cflag, baud); } static const char* lqasc_type(struct uart_port *port) { if (port->type == PORT_LTQ_ASC) return DRVNAME; else return NULL; } static void lqasc_release_port(struct uart_port *port) { if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } } static int lqasc_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct resource *res; int size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory region"); return -ENODEV; } size = resource_size(res); res = devm_request_mem_region(&pdev->dev, res->start, size, dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "cannot request I/O memory region"); return -EBUSY; } if (port->flags & UPF_IOREMAP) { port->membase = devm_ioremap_nocache(&pdev->dev, port->mapbase, size); if (port->membase == NULL) return -ENOMEM; } return 0; } static void lqasc_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_LTQ_ASC; lqasc_request_port(port); } } static int lqasc_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) ret = -EINVAL; if (ser->irq < 0 || ser->irq >= NR_IRQS) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } static struct uart_ops lqasc_pops = { .tx_empty = lqasc_tx_empty, .set_mctrl = lqasc_set_mctrl, .get_mctrl = lqasc_get_mctrl, .stop_tx = lqasc_stop_tx, .start_tx = lqasc_start_tx, .stop_rx = lqasc_stop_rx, .enable_ms = lqasc_enable_ms, .break_ctl = lqasc_break_ctl, .startup = lqasc_startup, .shutdown = lqasc_shutdown, .set_termios = lqasc_set_termios, .type = lqasc_type, .release_port = lqasc_release_port, .request_port = lqasc_request_port, .config_port = lqasc_config_port, .verify_port = lqasc_verify_port, }; static void lqasc_console_putchar(struct uart_port *port, int ch) { int fifofree; if (!port->membase) return; do { fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; } while (fifofree == 0); ltq_w8(ch, port->membase + LTQ_ASC_TBUF); } static void lqasc_console_write(struct console *co, const char *s, u_int count) { struct ltq_uart_port *ltq_port; struct uart_port *port; unsigned long flags; if (co->index >= MAXPORTS) return; ltq_port = lqasc_port[co->index]; if (!ltq_port) return; port = &ltq_port->port; spin_lock_irqsave(&ltq_asc_lock, flags); uart_console_write(port, s, count, lqasc_console_putchar); spin_unlock_irqrestore(&ltq_asc_lock, flags); } static int __init lqasc_console_setup(struct console *co, char *options) { struct ltq_uart_port *ltq_port; struct uart_port *port; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index >= MAXPORTS) return -ENODEV; ltq_port = lqasc_port[co->index]; if (!ltq_port) return -ENODEV; port = &ltq_port->port; port->uartclk = clk_get_rate(ltq_port->fpiclk); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct console lqasc_console = { .name = "ttyLTQ", .write = lqasc_console_write, .device = uart_console_device, .setup = lqasc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &lqasc_reg, }; static int __init lqasc_console_init(void) { register_console(&lqasc_console); return 0; } console_initcall(lqasc_console_init); static struct uart_driver lqasc_reg = { .owner = THIS_MODULE, .driver_name = DRVNAME, .dev_name = "ttyLTQ", .major = 0, .minor = 0, .nr = MAXPORTS, .cons = &lqasc_console, }; static int __init lqasc_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct ltq_uart_port *ltq_port; struct uart_port *port; struct resource *mmres, irqres[3]; int line = 0; int ret; mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); ret = of_irq_to_resource_table(node, irqres, 3); if (!mmres || (ret != 3)) { dev_err(&pdev->dev, "failed to get memory/irq for serial port\n"); return -ENODEV; } /* check if this is the console port */ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC)) line = 1; if (lqasc_port[line]) { dev_err(&pdev->dev, "port %d already allocated\n", line); return -EBUSY; } ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port), GFP_KERNEL); if (!ltq_port) return -ENOMEM; port = &ltq_port->port; port->iotype = SERIAL_IO_MEM; port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; port->ops = &lqasc_pops; port->fifosize = 16; port->type = PORT_LTQ_ASC, port->line = line; port->dev = &pdev->dev; /* unused, just to be backward-compatible */ port->irq = irqres[0].start; port->mapbase = mmres->start; ltq_port->fpiclk = clk_get_fpi(); if (IS_ERR(ltq_port->fpiclk)) { pr_err("failed to get fpi clk\n"); return -ENOENT; } /* not all asc ports have clock gates, lets ignore the return code */ ltq_port->clk = clk_get(&pdev->dev, NULL); ltq_port->tx_irq = irqres[0].start; ltq_port->rx_irq = irqres[1].start; ltq_port->err_irq = irqres[2].start; lqasc_port[line] = ltq_port; platform_set_drvdata(pdev, ltq_port); ret = uart_add_one_port(&lqasc_reg, port); return ret; } static const struct of_device_id ltq_asc_match[] = { { .compatible = DRVNAME }, {}, }; MODULE_DEVICE_TABLE(of, ltq_asc_match); static struct platform_driver lqasc_driver = { .driver = { .name = DRVNAME, .owner = THIS_MODULE, .of_match_table = ltq_asc_match, }, }; int __init init_lqasc(void) { int ret; ret = uart_register_driver(&lqasc_reg); if (ret != 0) return ret; ret = platform_driver_probe(&lqasc_driver, lqasc_probe); if (ret != 0) uart_unregister_driver(&lqasc_reg); return ret; } module_init(init_lqasc); MODULE_DESCRIPTION("Lantiq serial port driver"); MODULE_LICENSE("GPL");
gpl-2.0
IxLabs/net-next
arch/arm/mach-s3c64xx/clock.c
2173
23918
/* linux/arch/arm/plat-s3c64xx/clock.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX Base clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/pll.h> #include "regs-sys.h" /* fin_apll, fin_mpll and fin_epll are all the same clock, which we call * ext_xtal_mux for want of an actual name from the manual. */ static struct clk clk_ext_xtal_mux = { .name = "ext_xtal", }; #define clk_fin_apll clk_ext_xtal_mux #define clk_fin_mpll clk_ext_xtal_mux #define clk_fin_epll clk_ext_xtal_mux #define clk_fout_mpll clk_mpll #define clk_fout_epll clk_epll struct clk clk_h2 = { .name = "hclk2", .rate = 0, }; struct clk clk_27m = { .name = "clk_27m", .rate = 27000000, }; static int clk_48m_ctrl(struct clk *clk, int enable) { unsigned long flags; u32 val; /* can't rely on clock lock, this register has other usages */ local_irq_save(flags); val = __raw_readl(S3C64XX_OTHERS); if (enable) val |= S3C64XX_OTHERS_USBMASK; else val &= ~S3C64XX_OTHERS_USBMASK; __raw_writel(val, S3C64XX_OTHERS); local_irq_restore(flags); return 0; } struct clk clk_48m = { .name = "clk_48m", .rate = 48000000, .enable = clk_48m_ctrl, }; struct clk clk_xusbxti = { .name = "xusbxti", .rate = 48000000, }; static int inline s3c64xx_gate(void __iomem *reg, struct clk *clk, int enable) { unsigned int ctrlbit = clk->ctrlbit; u32 con; con = __raw_readl(reg); if (enable) con |= ctrlbit; else con &= ~ctrlbit; __raw_writel(con, reg); return 0; } static int s3c64xx_pclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_PCLK_GATE, clk, enable); } static int s3c64xx_hclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_HCLK_GATE, clk, enable); } int s3c64xx_sclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_SCLK_GATE, clk, enable); } static struct clk init_clocks_off[] = { { .name = "nand", .parent = &clk_h, }, { .name = "rtc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_RTC, }, { .name = "adc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_TSADC, }, { .name = "i2c", .devname = "s3c2440-i2c.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIC, }, { .name = "i2c", .devname = "s3c2440-i2c.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, }, { .name = "keypad", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_KEYPAD, }, { .name = "spi", .devname = "s3c6410-spi.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI0, }, { .name = "spi", .devname = "s3c6410-spi.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI1, }, { .name = "48m", .devname = "s3c-sdhci.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC0_48, }, { .name = "48m", .devname = "s3c-sdhci.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC1_48, }, { .name = "48m", .devname = "s3c-sdhci.2", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC2_48, }, { .name = "ac97", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_AC97, }, { .name = "cfcon", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_IHOST, }, { .name = "dma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA0, }, { .name = "dma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA1, }, { .name = "3dse", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_3DSE, }, { .name = "hclk_secur", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SECUR, }, { .name = "sdma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA1, }, { .name = "sdma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA0, }, { .name = "hclk_jpeg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_JPEG, }, { .name = "camif", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_CAMIF, }, { .name = "hclk_scaler", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SCALER, }, { .name = "2d", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_2D, }, { .name = "tv", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_TV, }, { .name = "post0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_POST0, }, { .name = "rot", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_ROT, }, { .name = "hclk_mfc", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_MFC, }, { .name = "pclk_mfc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_MFC, }, { .name = "dac27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_DAC27, }, { .name = "tv27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_TV27, }, { .name = "scaler27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER27, }, { .name = "sclk_scaler", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER, }, { .name = "post0_27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_POST0_27, }, { .name = "secur", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SECUR, }, { .name = "sclk_mfc", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MFC, }, { .name = "sclk_jpeg", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_JPEG, }, }; static struct clk clk_48m_spi0 = { .name = "spi_48m", .devname = "s3c6410-spi.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI0_48, }; static struct clk clk_48m_spi1 = { .name = "spi_48m", .devname = "s3c6410-spi.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI1_48, }; static struct clk clk_i2s0 = { .name = "iis", .devname = "samsung-i2s.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS0, }; static struct clk clk_i2s1 = { .name = "iis", .devname = "samsung-i2s.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS1, }; #ifdef CONFIG_CPU_S3C6410 static struct clk clk_i2s2 = { .name = "iis", .devname = "samsung-i2s.2", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_IIS2, }; #endif static struct clk init_clocks[] = { { .name = "lcd", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_LCD, }, { .name = "gpio", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_GPIO, }, { .name = "usb-host", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_UHOST, }, { .name = "otg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_USB, }, { .name = "timers", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_PWM, }, { .name = "uart", .devname = "s3c6400-uart.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART0, }, { .name = "uart", .devname = "s3c6400-uart.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART1, }, { .name = "uart", .devname = "s3c6400-uart.2", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART2, }, { .name = "uart", .devname = "s3c6400-uart.3", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART3, }, { .name = "watchdog", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_WDT, }, }; static struct clk clk_hsmmc0 = { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC0, }; static struct clk clk_hsmmc1 = { .name = "hsmmc", .devname = "s3c-sdhci.1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC1, }; static struct clk clk_hsmmc2 = { .name = "hsmmc", .devname = "s3c-sdhci.2", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC2, }; static struct clk clk_fout_apll = { .name = "fout_apll", }; static struct clk *clk_src_apll_list[] = { [0] = &clk_fin_apll, [1] = &clk_fout_apll, }; static struct clksrc_sources clk_src_apll = { .sources = clk_src_apll_list, .nr_sources = ARRAY_SIZE(clk_src_apll_list), }; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 }, .sources = &clk_src_apll, }; static struct clk *clk_src_epll_list[] = { [0] = &clk_fin_epll, [1] = &clk_fout_epll, }; static struct clksrc_sources clk_src_epll = { .sources = clk_src_epll_list, .nr_sources = ARRAY_SIZE(clk_src_epll_list), }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 }, .sources = &clk_src_epll, }; static struct clk *clk_src_mpll_list[] = { [0] = &clk_fin_mpll, [1] = &clk_fout_mpll, }; static struct clksrc_sources clk_src_mpll = { .sources = clk_src_mpll_list, .nr_sources = ARRAY_SIZE(clk_src_mpll_list), }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 }, .sources = &clk_src_mpll, }; static unsigned int armclk_mask; static unsigned long s3c64xx_clk_arm_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); u32 clkdiv; /* divisor mask starts at bit0, so no need to shift */ clkdiv = __raw_readl(S3C_CLK_DIV0) & armclk_mask; return rate / (clkdiv + 1); } static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; if (parent < rate) return parent; div = (parent / rate) - 1; if (div > armclk_mask) div = armclk_mask; return parent / (div + 1); } static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; u32 val; if (rate < parent / (armclk_mask + 1)) return -EINVAL; rate = clk_round_rate(clk, rate); div = clk_get_rate(clk->parent) / rate; val = __raw_readl(S3C_CLK_DIV0); val &= ~armclk_mask; val |= (div - 1); __raw_writel(val, S3C_CLK_DIV0); return 0; } static struct clk clk_arm = { .name = "armclk", .parent = &clk_mout_apll.clk, .ops = &(struct clk_ops) { .get_rate = s3c64xx_clk_arm_get_rate, .set_rate = s3c64xx_clk_arm_set_rate, .round_rate = s3c64xx_clk_arm_round_rate, }, }; static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate); if (__raw_readl(S3C_CLK_DIV0) & S3C6400_CLKDIV0_MPLL_MASK) rate /= 2; return rate; } static struct clk_ops clk_dout_ops = { .get_rate = s3c64xx_clk_doutmpll_get_rate, }; static struct clk clk_dout_mpll = { .name = "dout_mpll", .parent = &clk_mout_mpll.clk, .ops = &clk_dout_ops, }; static struct clk *clkset_spi_mmc_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, &clk_27m, }; static struct clksrc_sources clkset_spi_mmc = { .sources = clkset_spi_mmc_list, .nr_sources = ARRAY_SIZE(clkset_spi_mmc_list), }; static struct clk *clkset_irda_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, &clk_27m, }; static struct clksrc_sources clkset_irda = { .sources = clkset_irda_list, .nr_sources = ARRAY_SIZE(clkset_irda_list), }; static struct clk *clkset_uart_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, NULL }; static struct clksrc_sources clkset_uart = { .sources = clkset_uart_list, .nr_sources = ARRAY_SIZE(clkset_uart_list), }; static struct clk *clkset_uhost_list[] = { &clk_48m, &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, }; static struct clksrc_sources clkset_uhost = { .sources = clkset_uhost_list, .nr_sources = ARRAY_SIZE(clkset_uhost_list), }; /* The peripheral clocks are all controlled via clocksource followed * by an optional divider and gate stage. We currently roll this into * one clock which hides the intermediate clock from the mux. * * Note, the JPEG clock can only be an even divider... * * The scaler and LCD clocks depend on the S3C64XX version, and also * have a common parent divisor so are not included here. */ /* clocks that feed other parts of the clock source tree */ static struct clk clk_iis_cd0 = { .name = "iis_cdclk0", }; static struct clk clk_iis_cd1 = { .name = "iis_cdclk1", }; static struct clk clk_iisv4_cd = { .name = "iis_cdclk_v4", }; static struct clk clk_pcm_cd = { .name = "pcm_cdclk", }; static struct clk *clkset_audio0_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd0, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio0 = { .sources = clkset_audio0_list, .nr_sources = ARRAY_SIZE(clkset_audio0_list), }; static struct clk *clkset_audio1_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd1, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio1 = { .sources = clkset_audio1_list, .nr_sources = ARRAY_SIZE(clkset_audio1_list), }; #ifdef CONFIG_CPU_S3C6410 static struct clk *clkset_audio2_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iisv4_cd, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio2 = { .sources = clkset_audio2_list, .nr_sources = ARRAY_SIZE(clkset_audio2_list), }; #endif static struct clksrc_clk clksrcs[] = { { .clk = { .name = "usb-bus-host", .ctrlbit = S3C_CLKCON_SCLK_UHOST, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 5, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 20, .size = 4 }, .sources = &clkset_uhost, }, { .clk = { .name = "irda-bus", .ctrlbit = S3C_CLKCON_SCLK_IRDA, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 24, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 20, .size = 4 }, .sources = &clkset_irda, }, { .clk = { .name = "camera", .ctrlbit = S3C_CLKCON_SCLK_CAM, .enable = s3c64xx_sclk_ctrl, .parent = &clk_h2, }, .reg_div = { .reg = S3C_CLK_DIV0, .shift = 20, .size = 4 }, }, }; /* Where does UCLK0 come from? */ static struct clksrc_clk clk_sclk_uclk = { .clk = { .name = "uclk1", .ctrlbit = S3C_CLKCON_SCLK_UART, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 }, .sources = &clkset_uart, }; static struct clksrc_clk clk_sclk_mmc0 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.0", .ctrlbit = S3C_CLKCON_SCLK_MMC0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc1 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.1", .ctrlbit = S3C_CLKCON_SCLK_MMC1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc2 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.2", .ctrlbit = S3C_CLKCON_SCLK_MMC2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi0 = { .clk = { .name = "spi-bus", .devname = "s3c6410-spi.0", .ctrlbit = S3C_CLKCON_SCLK_SPI0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi1 = { .clk = { .name = "spi-bus", .devname = "s3c6410-spi.1", .ctrlbit = S3C_CLKCON_SCLK_SPI1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_audio_bus0 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.0", .ctrlbit = S3C_CLKCON_SCLK_AUDIO0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 }, .sources = &clkset_audio0, }; static struct clksrc_clk clk_audio_bus1 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.1", .ctrlbit = S3C_CLKCON_SCLK_AUDIO1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 }, .sources = &clkset_audio1, }; #ifdef CONFIG_CPU_S3C6410 static struct clksrc_clk clk_audio_bus2 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.2", .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 }, .sources = &clkset_audio2, }; #endif /* Clock initialisation code */ static struct clksrc_clk *init_parents[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, }; static struct clksrc_clk *clksrc_cdev[] = { &clk_sclk_uclk, &clk_sclk_mmc0, &clk_sclk_mmc1, &clk_sclk_mmc2, &clk_sclk_spi0, &clk_sclk_spi1, &clk_audio_bus0, &clk_audio_bus1, }; static struct clk *clk_cdev[] = { &clk_hsmmc0, &clk_hsmmc1, &clk_hsmmc2, &clk_48m_spi0, &clk_48m_spi1, &clk_i2s0, &clk_i2s1, }; static struct clk_lookup s3c64xx_clk_lookup[] = { CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk), CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s3c6410-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s3c6410-spi.0", "spi_busclk2", &clk_48m_spi0), CLKDEV_INIT("s3c6410-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), CLKDEV_INIT("s3c6410-spi.1", "spi_busclk2", &clk_48m_spi1), CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0), CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_audio_bus0.clk), CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1), CLKDEV_INIT("samsung-i2s.1", "i2s_opclk1", &clk_audio_bus1.clk), #ifdef CONFIG_CPU_S3C6410 CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2), CLKDEV_INIT("samsung-i2s.2", "i2s_opclk1", &clk_audio_bus2.clk), #endif }; #define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1) void __init_or_cpufreq s3c64xx_setup_clocks(void) { struct clk *xtal_clk; unsigned long xtal; unsigned long fclk; unsigned long hclk; unsigned long hclk2; unsigned long pclk; unsigned long epll; unsigned long apll; unsigned long mpll; unsigned int ptr; u32 clkdiv0; printk(KERN_DEBUG "%s: registering clocks\n", __func__); clkdiv0 = __raw_readl(S3C_CLK_DIV0); printk(KERN_DEBUG "%s: clkdiv0 = %08x\n", __func__, clkdiv0); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); /* For now assume the mux always selects the crystal */ clk_ext_xtal_mux.parent = xtal_clk; epll = s3c_get_pll6553x(xtal, __raw_readl(S3C_EPLL_CON0), __raw_readl(S3C_EPLL_CON1)); mpll = s3c6400_get_pll(xtal, __raw_readl(S3C_MPLL_CON)); apll = s3c6400_get_pll(xtal, __raw_readl(S3C_APLL_CON)); fclk = mpll; printk(KERN_INFO "S3C64XX: PLL settings, A=%ld, M=%ld, E=%ld\n", apll, mpll, epll); if(__raw_readl(S3C64XX_OTHERS) & S3C64XX_OTHERS_SYNCMUXSEL) /* Synchronous mode */ hclk2 = apll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); else /* Asynchronous mode */ hclk2 = mpll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); hclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK); pclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_PCLK); printk(KERN_INFO "S3C64XX: HCLK2=%ld, HCLK=%ld, PCLK=%ld\n", hclk2, hclk, pclk); clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_apll.rate = apll; clk_h2.rate = hclk2; clk_h.rate = hclk; clk_p.rate = pclk; clk_f.rate = fclk; for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++) s3c_set_clksrc(init_parents[ptr], true); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks1[] __initdata = { &clk_ext_xtal_mux, &clk_iis_cd0, &clk_iis_cd1, &clk_iisv4_cd, &clk_pcm_cd, &clk_mout_epll.clk, &clk_mout_mpll.clk, &clk_dout_mpll, &clk_arm, }; static struct clk *clks[] __initdata = { &clk_ext, &clk_epll, &clk_27m, &clk_48m, &clk_h2, &clk_xusbxti, }; /** * s3c64xx_register_clocks - register clocks for s3c6400 and s3c6410 * @xtal: The rate for the clock crystal feeding the PLLs. * @armclk_divlimit: Divisor mask for ARMCLK. * * Register the clocks for the S3C6400 and S3C6410 SoC range, such * as ARMCLK as well as the necessary parent clocks. * * This call does not setup the clocks, which is left to the * s3c64xx_setup_clocks() call which may be needed by the cpufreq * or resume code to re-set the clocks if the bootloader has changed * them. */ void __init s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_divlimit) { unsigned int cnt; armclk_mask = armclk_divlimit; s3c24xx_register_baseclocks(xtal); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++) s3c_disable_clocks(clk_cdev[cnt], 1); s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1)); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++) s3c_register_clksrc(clksrc_cdev[cnt], 1); clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup)); s3c_pwmclk_init(); }
gpl-2.0
ShevT/android_kernel_d1_p1
drivers/power/pda_power.c
2173
11833
/* * Common power driver for PDAs and phones with one or two external * power supplies (AC/USB) connected to main and backup batteries, * and optional builtin charger. * * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/power_supply.h> #include <linux/pda_power.h> #include <linux/regulator/consumer.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/usb/otg.h> static inline unsigned int get_irq_flags(struct resource *res) { unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; flags |= res->flags & IRQF_TRIGGER_MASK; return flags; } static struct device *dev; static struct pda_power_pdata *pdata; static struct resource *ac_irq, *usb_irq; static struct timer_list charger_timer; static struct timer_list supply_timer; static struct timer_list polling_timer; static int polling; static struct otg_transceiver *transceiver; static struct notifier_block otg_nb; static struct regulator *ac_draw; enum { PDA_PSY_OFFLINE = 0, PDA_PSY_ONLINE = 1, PDA_PSY_TO_CHANGE, }; static int new_ac_status = -1; static int new_usb_status = -1; static int ac_status = -1; static int usb_status = -1; static int pda_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: if (psy->type == POWER_SUPPLY_TYPE_MAINS) val->intval = pdata->is_ac_online ? pdata->is_ac_online() : 0; else val->intval = pdata->is_usb_online ? pdata->is_usb_online() : 0; break; default: return -EINVAL; } return 0; } static enum power_supply_property pda_power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static char *pda_power_supplied_to[] = { "main-battery", "backup-battery", }; static struct power_supply pda_psy_ac = { .name = "ac", .type = POWER_SUPPLY_TYPE_MAINS, .supplied_to = pda_power_supplied_to, .num_supplicants = ARRAY_SIZE(pda_power_supplied_to), .properties = pda_power_props, .num_properties = ARRAY_SIZE(pda_power_props), .get_property = pda_power_get_property, }; static struct power_supply pda_psy_usb = { .name = "usb", .type = POWER_SUPPLY_TYPE_USB, .supplied_to = pda_power_supplied_to, .num_supplicants = ARRAY_SIZE(pda_power_supplied_to), .properties = pda_power_props, .num_properties = ARRAY_SIZE(pda_power_props), .get_property = pda_power_get_property, }; static void update_status(void) { if (pdata->is_ac_online) new_ac_status = !!pdata->is_ac_online(); if (pdata->is_usb_online) new_usb_status = !!pdata->is_usb_online(); } static void update_charger(void) { static int regulator_enabled; int max_uA = pdata->ac_max_uA; if (pdata->set_charge) { if (new_ac_status > 0) { dev_dbg(dev, "charger on (AC)\n"); pdata->set_charge(PDA_POWER_CHARGE_AC); } else if (new_usb_status > 0) { dev_dbg(dev, "charger on (USB)\n"); pdata->set_charge(PDA_POWER_CHARGE_USB); } else { dev_dbg(dev, "charger off\n"); pdata->set_charge(0); } } else if (ac_draw) { if (new_ac_status > 0) { regulator_set_current_limit(ac_draw, max_uA, max_uA); if (!regulator_enabled) { dev_dbg(dev, "charger on (AC)\n"); regulator_enable(ac_draw); regulator_enabled = 1; } } else { if (regulator_enabled) { dev_dbg(dev, "charger off\n"); regulator_disable(ac_draw); regulator_enabled = 0; } } } } static void supply_timer_func(unsigned long unused) { if (ac_status == PDA_PSY_TO_CHANGE) { ac_status = new_ac_status; power_supply_changed(&pda_psy_ac); } if (usb_status == PDA_PSY_TO_CHANGE) { usb_status = new_usb_status; power_supply_changed(&pda_psy_usb); } } static void psy_changed(void) { update_charger(); /* * Okay, charger set. Now wait a bit before notifying supplicants, * charge power should stabilize. */ mod_timer(&supply_timer, jiffies + msecs_to_jiffies(pdata->wait_for_charger)); } static void charger_timer_func(unsigned long unused) { update_status(); psy_changed(); } static irqreturn_t power_changed_isr(int irq, void *power_supply) { if (power_supply == &pda_psy_ac) ac_status = PDA_PSY_TO_CHANGE; else if (power_supply == &pda_psy_usb) usb_status = PDA_PSY_TO_CHANGE; else return IRQ_NONE; /* * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ mod_timer(&charger_timer, jiffies + msecs_to_jiffies(pdata->wait_for_status)); return IRQ_HANDLED; } static void polling_timer_func(unsigned long unused) { int changed = 0; dev_dbg(dev, "polling...\n"); update_status(); if (!ac_irq && new_ac_status != ac_status) { ac_status = PDA_PSY_TO_CHANGE; changed = 1; } if (!usb_irq && new_usb_status != usb_status) { usb_status = PDA_PSY_TO_CHANGE; changed = 1; } if (changed) psy_changed(); mod_timer(&polling_timer, jiffies + msecs_to_jiffies(pdata->polling_interval)); } #ifdef CONFIG_USB_OTG_UTILS static int otg_is_usb_online(void) { return (transceiver->last_event == USB_EVENT_VBUS || transceiver->last_event == USB_EVENT_ENUMERATED); } static int otg_is_ac_online(void) { return (transceiver->last_event == USB_EVENT_CHARGER); } static int otg_handle_notification(struct notifier_block *nb, unsigned long event, void *unused) { switch (event) { case USB_EVENT_CHARGER: ac_status = PDA_PSY_TO_CHANGE; break; case USB_EVENT_VBUS: case USB_EVENT_ENUMERATED: usb_status = PDA_PSY_TO_CHANGE; break; case USB_EVENT_NONE: ac_status = PDA_PSY_TO_CHANGE; usb_status = PDA_PSY_TO_CHANGE; break; default: return NOTIFY_OK; } /* * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ mod_timer(&charger_timer, jiffies + msecs_to_jiffies(pdata->wait_for_status)); return NOTIFY_OK; } #endif static int pda_power_probe(struct platform_device *pdev) { int ret = 0; dev = &pdev->dev; if (pdev->id != -1) { dev_err(dev, "it's meaningless to register several " "pda_powers; use id = -1\n"); ret = -EINVAL; goto wrongid; } pdata = pdev->dev.platform_data; if (pdata->init) { ret = pdata->init(dev); if (ret < 0) goto init_failed; } update_status(); update_charger(); if (!pdata->wait_for_status) pdata->wait_for_status = 500; if (!pdata->wait_for_charger) pdata->wait_for_charger = 500; if (!pdata->polling_interval) pdata->polling_interval = 2000; if (!pdata->ac_max_uA) pdata->ac_max_uA = 500000; setup_timer(&charger_timer, charger_timer_func, 0); setup_timer(&supply_timer, supply_timer_func, 0); ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac"); usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb"); if (pdata->supplied_to) { pda_psy_ac.supplied_to = pdata->supplied_to; pda_psy_ac.num_supplicants = pdata->num_supplicants; pda_psy_usb.supplied_to = pdata->supplied_to; pda_psy_usb.num_supplicants = pdata->num_supplicants; } ac_draw = regulator_get(dev, "ac_draw"); if (IS_ERR(ac_draw)) { dev_dbg(dev, "couldn't get ac_draw regulator\n"); ac_draw = NULL; ret = PTR_ERR(ac_draw); } transceiver = otg_get_transceiver(); if (transceiver && !pdata->is_usb_online) { pdata->is_usb_online = otg_is_usb_online; } if (transceiver && !pdata->is_ac_online) { pdata->is_ac_online = otg_is_ac_online; } if (pdata->is_ac_online) { ret = power_supply_register(&pdev->dev, &pda_psy_ac); if (ret) { dev_err(dev, "failed to register %s power supply\n", pda_psy_ac.name); goto ac_supply_failed; } if (ac_irq) { ret = request_irq(ac_irq->start, power_changed_isr, get_irq_flags(ac_irq), ac_irq->name, &pda_psy_ac); if (ret) { dev_err(dev, "request ac irq failed\n"); goto ac_irq_failed; } } else { polling = 1; } } if (pdata->is_usb_online) { ret = power_supply_register(&pdev->dev, &pda_psy_usb); if (ret) { dev_err(dev, "failed to register %s power supply\n", pda_psy_usb.name); goto usb_supply_failed; } if (usb_irq) { ret = request_irq(usb_irq->start, power_changed_isr, get_irq_flags(usb_irq), usb_irq->name, &pda_psy_usb); if (ret) { dev_err(dev, "request usb irq failed\n"); goto usb_irq_failed; } } else { polling = 1; } } if (transceiver && pdata->use_otg_notifier) { otg_nb.notifier_call = otg_handle_notification; ret = otg_register_notifier(transceiver, &otg_nb); if (ret) { dev_err(dev, "failure to register otg notifier\n"); goto otg_reg_notifier_failed; } polling = 0; } if (polling) { dev_dbg(dev, "will poll for status\n"); setup_timer(&polling_timer, polling_timer_func, 0); mod_timer(&polling_timer, jiffies + msecs_to_jiffies(pdata->polling_interval)); } if (ac_irq || usb_irq) device_init_wakeup(&pdev->dev, 1); return 0; otg_reg_notifier_failed: if (pdata->is_usb_online && usb_irq) free_irq(usb_irq->start, &pda_psy_usb); usb_irq_failed: if (pdata->is_usb_online) power_supply_unregister(&pda_psy_usb); usb_supply_failed: if (pdata->is_ac_online && ac_irq) free_irq(ac_irq->start, &pda_psy_ac); if (transceiver) otg_put_transceiver(transceiver); ac_irq_failed: if (pdata->is_ac_online) power_supply_unregister(&pda_psy_ac); ac_supply_failed: if (ac_draw) { regulator_put(ac_draw); ac_draw = NULL; } if (pdata->exit) pdata->exit(dev); init_failed: wrongid: return ret; } static int pda_power_remove(struct platform_device *pdev) { if (pdata->is_usb_online && usb_irq) free_irq(usb_irq->start, &pda_psy_usb); if (pdata->is_ac_online && ac_irq) free_irq(ac_irq->start, &pda_psy_ac); if (polling) del_timer_sync(&polling_timer); del_timer_sync(&charger_timer); del_timer_sync(&supply_timer); if (pdata->is_usb_online) power_supply_unregister(&pda_psy_usb); if (pdata->is_ac_online) power_supply_unregister(&pda_psy_ac); #ifdef CONFIG_USB_OTG_UTILS if (transceiver) otg_put_transceiver(transceiver); #endif if (ac_draw) { regulator_put(ac_draw); ac_draw = NULL; } if (pdata->exit) pdata->exit(dev); return 0; } #ifdef CONFIG_PM static int ac_wakeup_enabled; static int usb_wakeup_enabled; static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) { if (pdata->suspend) { int ret = pdata->suspend(state); if (ret) return ret; } if (device_may_wakeup(&pdev->dev)) { if (ac_irq) ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); if (usb_irq) usb_wakeup_enabled = !enable_irq_wake(usb_irq->start); } return 0; } static int pda_power_resume(struct platform_device *pdev) { if (device_may_wakeup(&pdev->dev)) { if (usb_irq && usb_wakeup_enabled) disable_irq_wake(usb_irq->start); if (ac_irq && ac_wakeup_enabled) disable_irq_wake(ac_irq->start); } if (pdata->resume) return pdata->resume(); return 0; } #else #define pda_power_suspend NULL #define pda_power_resume NULL #endif /* CONFIG_PM */ MODULE_ALIAS("platform:pda-power"); static struct platform_driver pda_power_pdrv = { .driver = { .name = "pda-power", }, .probe = pda_power_probe, .remove = pda_power_remove, .suspend = pda_power_suspend, .resume = pda_power_resume, }; static int __init pda_power_init(void) { return platform_driver_register(&pda_power_pdrv); } static void __exit pda_power_exit(void) { platform_driver_unregister(&pda_power_pdrv); } module_init(pda_power_init); module_exit(pda_power_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
gpl-2.0
geduino-foundation/kernel-unico
drivers/hwmon/lm93.c
3197
85184
/* lm93.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com> Copyright (c) 2004 Utilitek Systems, Inc. derived in part from lm78.c: Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> derived in part from lm85.c: Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com> Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de> derived in part from w83l785ts.c: Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org> Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com> Copyright (c) 2005 Aspen Systems, Inc. Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> Copyright (c) 2007 Hans J. Koch, Linutronix GmbH This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/delay.h> /* LM93 REGISTER ADDRESSES */ /* miscellaneous */ #define LM93_REG_MFR_ID 0x3e #define LM93_REG_VER 0x3f #define LM93_REG_STATUS_CONTROL 0xe2 #define LM93_REG_CONFIG 0xe3 #define LM93_REG_SLEEP_CONTROL 0xe4 /* alarm values start here */ #define LM93_REG_HOST_ERROR_1 0x48 /* voltage inputs: in1-in16 (nr => 0-15) */ #define LM93_REG_IN(nr) (0x56 + (nr)) #define LM93_REG_IN_MIN(nr) (0x90 + (nr) * 2) #define LM93_REG_IN_MAX(nr) (0x91 + (nr) * 2) /* temperature inputs: temp1-temp4 (nr => 0-3) */ #define LM93_REG_TEMP(nr) (0x50 + (nr)) #define LM93_REG_TEMP_MIN(nr) (0x78 + (nr) * 2) #define LM93_REG_TEMP_MAX(nr) (0x79 + (nr) * 2) /* temp[1-4]_auto_boost (nr => 0-3) */ #define LM93_REG_BOOST(nr) (0x80 + (nr)) /* #PROCHOT inputs: prochot1-prochot2 (nr => 0-1) */ #define LM93_REG_PROCHOT_CUR(nr) (0x67 + (nr) * 2) #define LM93_REG_PROCHOT_AVG(nr) (0x68 + (nr) * 2) #define LM93_REG_PROCHOT_MAX(nr) (0xb0 + (nr)) /* fan tach inputs: fan1-fan4 (nr => 0-3) */ #define LM93_REG_FAN(nr) (0x6e + (nr) * 2) #define LM93_REG_FAN_MIN(nr) (0xb4 + (nr) * 2) /* pwm outputs: pwm1-pwm2 (nr => 0-1, reg => 0-3) */ #define LM93_REG_PWM_CTL(nr,reg) (0xc8 + (reg) + (nr) * 4) #define LM93_PWM_CTL1 0x0 #define LM93_PWM_CTL2 0x1 #define LM93_PWM_CTL3 0x2 #define LM93_PWM_CTL4 0x3 /* GPIO input state */ #define LM93_REG_GPI 0x6b /* vid inputs: vid1-vid2 (nr => 0-1) */ #define LM93_REG_VID(nr) (0x6c + (nr)) /* vccp1 & vccp2: VID relative inputs (nr => 0-1) */ #define LM93_REG_VCCP_LIMIT_OFF(nr) (0xb2 + (nr)) /* temp[1-4]_auto_boost_hyst */ #define LM93_REG_BOOST_HYST_12 0xc0 #define LM93_REG_BOOST_HYST_34 0xc1 #define LM93_REG_BOOST_HYST(nr) (0xc0 + (nr)/2) /* temp[1-4]_auto_pwm_[min|hyst] */ #define LM93_REG_PWM_MIN_HYST_12 0xc3 #define LM93_REG_PWM_MIN_HYST_34 0xc4 #define LM93_REG_PWM_MIN_HYST(nr) (0xc3 + (nr)/2) /* prochot_override & prochot_interval */ #define LM93_REG_PROCHOT_OVERRIDE 0xc6 #define LM93_REG_PROCHOT_INTERVAL 0xc7 /* temp[1-4]_auto_base (nr => 0-3) */ #define LM93_REG_TEMP_BASE(nr) (0xd0 + (nr)) /* temp[1-4]_auto_offsets (step => 0-11) */ #define LM93_REG_TEMP_OFFSET(step) (0xd4 + (step)) /* #PROCHOT & #VRDHOT PWM ramp control */ #define LM93_REG_PWM_RAMP_CTL 0xbf /* miscellaneous */ #define LM93_REG_SFC1 0xbc #define LM93_REG_SFC2 0xbd #define LM93_REG_GPI_VID_CTL 0xbe #define LM93_REG_SF_TACH_TO_PWM 0xe0 /* error masks */ #define LM93_REG_GPI_ERR_MASK 0xec #define LM93_REG_MISC_ERR_MASK 0xed /* LM93 REGISTER VALUES */ #define LM93_MFR_ID 0x73 #define LM93_MFR_ID_PROTOTYPE 0x72 /* LM94 REGISTER VALUES */ #define LM94_MFR_ID_2 0x7a #define LM94_MFR_ID 0x79 #define LM94_MFR_ID_PROTOTYPE 0x78 /* SMBus capabilities */ #define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA) #define LM93_SMBUS_FUNC_MIN (I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA) /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ static int disable_block; module_param(disable_block, bool, 0); MODULE_PARM_DESC(disable_block, "Set to non-zero to disable SMBus block data transactions."); static int init; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization."); static int vccp_limit_type[2] = {0,0}; module_param_array(vccp_limit_type, int, NULL, 0); MODULE_PARM_DESC(vccp_limit_type, "Configures in7 and in8 limit modes."); static int vid_agtl; module_param(vid_agtl, int, 0); MODULE_PARM_DESC(vid_agtl, "Configures VID pin input thresholds."); /* Driver data */ static struct i2c_driver lm93_driver; /* LM93 BLOCK READ COMMANDS */ static const struct { u8 cmd; u8 len; } lm93_block_read_cmds[12] = { { 0xf2, 8 }, { 0xf3, 8 }, { 0xf4, 6 }, { 0xf5, 16 }, { 0xf6, 4 }, { 0xf7, 8 }, { 0xf8, 12 }, { 0xf9, 32 }, { 0xfa, 8 }, { 0xfb, 8 }, { 0xfc, 16 }, { 0xfd, 9 }, }; /* ALARMS: SYSCTL format described further below REG: 64 bits in 8 registers, as immediately below */ struct block1_t { u8 host_status_1; u8 host_status_2; u8 host_status_3; u8 host_status_4; u8 p1_prochot_status; u8 p2_prochot_status; u8 gpi_status; u8 fan_status; }; /* * Client-specific data */ struct lm93_data { struct device *hwmon_dev; struct mutex update_lock; unsigned long last_updated; /* In jiffies */ /* client update function */ void (*update)(struct lm93_data *, struct i2c_client *); char valid; /* !=0 if following fields are valid */ /* register values, arranged by block read groups */ struct block1_t block1; /* temp1 - temp4: unfiltered readings temp1 - temp2: filtered readings */ u8 block2[6]; /* vin1 - vin16: readings */ u8 block3[16]; /* prochot1 - prochot2: readings */ struct { u8 cur; u8 avg; } block4[2]; /* fan counts 1-4 => 14-bits, LE, *left* justified */ u16 block5[4]; /* block6 has a lot of data we don't need */ struct { u8 min; u8 max; } temp_lim[4]; /* vin1 - vin16: low and high limits */ struct { u8 min; u8 max; } block7[16]; /* fan count limits 1-4 => same format as block5 */ u16 block8[4]; /* pwm control registers (2 pwms, 4 regs) */ u8 block9[2][4]; /* auto/pwm base temp and offset temp registers */ struct { u8 base[4]; u8 offset[12]; } block10; /* master config register */ u8 config; /* VID1 & VID2 => register format, 6-bits, right justified */ u8 vid[2]; /* prochot1 - prochot2: limits */ u8 prochot_max[2]; /* vccp1 & vccp2 (in7 & in8): VID relative limits (register format) */ u8 vccp_limits[2]; /* GPIO input state (register format, i.e. inverted) */ u8 gpi; /* #PROCHOT override (register format) */ u8 prochot_override; /* #PROCHOT intervals (register format) */ u8 prochot_interval; /* Fan Boost Temperatures (register format) */ u8 boost[4]; /* Fan Boost Hysteresis (register format) */ u8 boost_hyst[2]; /* Temperature Zone Min. PWM & Hysteresis (register format) */ u8 auto_pwm_min_hyst[2]; /* #PROCHOT & #VRDHOT PWM Ramp Control */ u8 pwm_ramp_ctl; /* miscellaneous setup regs */ u8 sfc1; u8 sfc2; u8 sf_tach_to_pwm; /* The two PWM CTL2 registers can read something other than what was last written for the OVR_DC field (duty cycle override). So, we save the user-commanded value here. */ u8 pwm_override[2]; }; /* VID: mV REG: 6-bits, right justified, *always* using Intel VRM/VRD 10 */ static int LM93_VID_FROM_REG(u8 reg) { return vid_from_reg((reg & 0x3f), 100); } /* min, max, and nominal register values, per channel (u8) */ static const u8 lm93_vin_reg_min[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, }; static const u8 lm93_vin_reg_max[16] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, }; /* Values from the datasheet. They're here for documentation only. static const u8 lm93_vin_reg_nom[16] = { 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x40, 0xc0, }; */ /* min, max, and nominal voltage readings, per channel (mV)*/ static const unsigned long lm93_vin_val_min[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, }; static const unsigned long lm93_vin_val_max[16] = { 1236, 1236, 1236, 1600, 2000, 2000, 1600, 1600, 4400, 6500, 3333, 2625, 1312, 1312, 1236, 3600, }; /* Values from the datasheet. They're here for documentation only. static const unsigned long lm93_vin_val_nom[16] = { 927, 927, 927, 1200, 1500, 1500, 1200, 1200, 3300, 5000, 2500, 1969, 984, 984, 309, 3300, }; */ static unsigned LM93_IN_FROM_REG(int nr, u8 reg) { const long uV_max = lm93_vin_val_max[nr] * 1000; const long uV_min = lm93_vin_val_min[nr] * 1000; const long slope = (uV_max - uV_min) / (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]); const long intercept = uV_min - slope * lm93_vin_reg_min[nr]; return (slope * reg + intercept + 500) / 1000; } /* IN: mV, limits determined by channel nr REG: scaling determined by channel nr */ static u8 LM93_IN_TO_REG(int nr, unsigned val) { /* range limit */ const long mV = SENSORS_LIMIT(val, lm93_vin_val_min[nr], lm93_vin_val_max[nr]); /* try not to lose too much precision here */ const long uV = mV * 1000; const long uV_max = lm93_vin_val_max[nr] * 1000; const long uV_min = lm93_vin_val_min[nr] * 1000; /* convert */ const long slope = (uV_max - uV_min) / (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]); const long intercept = uV_min - slope * lm93_vin_reg_min[nr]; u8 result = ((uV - intercept + (slope/2)) / slope); result = SENSORS_LIMIT(result, lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]); return result; } /* vid in mV, upper == 0 indicates low limit, otherwise upper limit */ static unsigned LM93_IN_REL_FROM_REG(u8 reg, int upper, int vid) { const long uV_offset = upper ? (((reg >> 4 & 0x0f) + 1) * 12500) : (((reg >> 0 & 0x0f) + 1) * -25000); const long uV_vid = vid * 1000; return (uV_vid + uV_offset + 5000) / 10000; } #define LM93_IN_MIN_FROM_REG(reg,vid) LM93_IN_REL_FROM_REG(reg,0,vid) #define LM93_IN_MAX_FROM_REG(reg,vid) LM93_IN_REL_FROM_REG(reg,1,vid) /* vid in mV , upper == 0 indicates low limit, otherwise upper limit upper also determines which nibble of the register is returned (the other nibble will be 0x0) */ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid) { long uV_offset = vid * 1000 - val * 10000; if (upper) { uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000); return (u8)((uV_offset / 12500 - 1) << 4); } else { uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000); return (u8)((uV_offset / -25000 - 1) << 0); } } /* TEMP: 1/1000 degrees C (-128C to +127C) REG: 1C/bit, two's complement */ static int LM93_TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } #define LM93_TEMP_MIN (-128000) #define LM93_TEMP_MAX ( 127000) /* TEMP: 1/1000 degrees C (-128C to +127C) REG: 1C/bit, two's complement */ static u8 LM93_TEMP_TO_REG(long temp) { int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX); ntemp += (ntemp<0 ? -500 : 500); return (u8)(ntemp / 1000); } /* Determine 4-bit temperature offset resolution */ static int LM93_TEMP_OFFSET_MODE_FROM_REG(u8 sfc2, int nr) { /* mode: 0 => 1C/bit, nonzero => 0.5C/bit */ return sfc2 & (nr < 2 ? 0x10 : 0x20); } /* This function is common to all 4-bit temperature offsets reg is 4 bits right justified mode 0 => 1C/bit, mode !0 => 0.5C/bit */ static int LM93_TEMP_OFFSET_FROM_REG(u8 reg, int mode) { return (reg & 0x0f) * (mode ? 5 : 10); } #define LM93_TEMP_OFFSET_MIN ( 0) #define LM93_TEMP_OFFSET_MAX0 (150) #define LM93_TEMP_OFFSET_MAX1 ( 75) /* This function is common to all 4-bit temperature offsets returns 4 bits right justified mode 0 => 1C/bit, mode !0 => 0.5C/bit */ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode) { int factor = mode ? 5 : 10; off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN, mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0); return (u8)((off + factor/2) / factor); } /* 0 <= nr <= 3 */ static int LM93_TEMP_AUTO_OFFSET_FROM_REG(u8 reg, int nr, int mode) { /* temp1-temp2 (nr=0,1) use lower nibble */ if (nr < 2) return LM93_TEMP_OFFSET_FROM_REG(reg & 0x0f, mode); /* temp3-temp4 (nr=2,3) use upper nibble */ else return LM93_TEMP_OFFSET_FROM_REG(reg >> 4 & 0x0f, mode); } /* TEMP: 1/10 degrees C (0C to +15C (mode 0) or +7.5C (mode non-zero)) REG: 1.0C/bit (mode 0) or 0.5C/bit (mode non-zero) 0 <= nr <= 3 */ static u8 LM93_TEMP_AUTO_OFFSET_TO_REG(u8 old, int off, int nr, int mode) { u8 new = LM93_TEMP_OFFSET_TO_REG(off, mode); /* temp1-temp2 (nr=0,1) use lower nibble */ if (nr < 2) return (old & 0xf0) | (new & 0x0f); /* temp3-temp4 (nr=2,3) use upper nibble */ else return (new << 4 & 0xf0) | (old & 0x0f); } static int LM93_AUTO_BOOST_HYST_FROM_REGS(struct lm93_data *data, int nr, int mode) { u8 reg; switch (nr) { case 0: reg = data->boost_hyst[0] & 0x0f; break; case 1: reg = data->boost_hyst[0] >> 4 & 0x0f; break; case 2: reg = data->boost_hyst[1] & 0x0f; break; case 3: default: reg = data->boost_hyst[1] >> 4 & 0x0f; break; } return LM93_TEMP_FROM_REG(data->boost[nr]) - LM93_TEMP_OFFSET_FROM_REG(reg, mode); } static u8 LM93_AUTO_BOOST_HYST_TO_REG(struct lm93_data *data, long hyst, int nr, int mode) { u8 reg = LM93_TEMP_OFFSET_TO_REG( (LM93_TEMP_FROM_REG(data->boost[nr]) - hyst), mode); switch (nr) { case 0: reg = (data->boost_hyst[0] & 0xf0) | (reg & 0x0f); break; case 1: reg = (reg << 4 & 0xf0) | (data->boost_hyst[0] & 0x0f); break; case 2: reg = (data->boost_hyst[1] & 0xf0) | (reg & 0x0f); break; case 3: default: reg = (reg << 4 & 0xf0) | (data->boost_hyst[1] & 0x0f); break; } return reg; } /* PWM: 0-255 per sensors documentation REG: 0-13 as mapped below... right justified */ typedef enum { LM93_PWM_MAP_HI_FREQ, LM93_PWM_MAP_LO_FREQ } pwm_freq_t; static int lm93_pwm_map[2][16] = { { 0x00, /* 0.00% */ 0x40, /* 25.00% */ 0x50, /* 31.25% */ 0x60, /* 37.50% */ 0x70, /* 43.75% */ 0x80, /* 50.00% */ 0x90, /* 56.25% */ 0xa0, /* 62.50% */ 0xb0, /* 68.75% */ 0xc0, /* 75.00% */ 0xd0, /* 81.25% */ 0xe0, /* 87.50% */ 0xf0, /* 93.75% */ 0xff, /* 100.00% */ 0xff, 0xff, /* 14, 15 are reserved and should never occur */ }, { 0x00, /* 0.00% */ 0x40, /* 25.00% */ 0x49, /* 28.57% */ 0x52, /* 32.14% */ 0x5b, /* 35.71% */ 0x64, /* 39.29% */ 0x6d, /* 42.86% */ 0x76, /* 46.43% */ 0x80, /* 50.00% */ 0x89, /* 53.57% */ 0x92, /* 57.14% */ 0xb6, /* 71.43% */ 0xdb, /* 85.71% */ 0xff, /* 100.00% */ 0xff, 0xff, /* 14, 15 are reserved and should never occur */ }, }; static int LM93_PWM_FROM_REG(u8 reg, pwm_freq_t freq) { return lm93_pwm_map[freq][reg & 0x0f]; } /* round up to nearest match */ static u8 LM93_PWM_TO_REG(int pwm, pwm_freq_t freq) { int i; for (i = 0; i < 13; i++) if (pwm <= lm93_pwm_map[freq][i]) break; /* can fall through with i==13 */ return (u8)i; } static int LM93_FAN_FROM_REG(u16 regs) { const u16 count = le16_to_cpu(regs) >> 2; return count==0 ? -1 : count==0x3fff ? 0: 1350000 / count; } /* * RPM: (82.5 to 1350000) * REG: 14-bits, LE, *left* justified */ static u16 LM93_FAN_TO_REG(long rpm) { u16 count, regs; if (rpm == 0) { count = 0x3fff; } else { rpm = SENSORS_LIMIT(rpm, 1, 1000000); count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe); } regs = count << 2; return cpu_to_le16(regs); } /* PWM FREQ: HZ REG: 0-7 as mapped below */ static int lm93_pwm_freq_map[8] = { 22500, 96, 84, 72, 60, 48, 36, 12 }; static int LM93_PWM_FREQ_FROM_REG(u8 reg) { return lm93_pwm_freq_map[reg & 0x07]; } /* round up to nearest match */ static u8 LM93_PWM_FREQ_TO_REG(int freq) { int i; for (i = 7; i > 0; i--) if (freq <= lm93_pwm_freq_map[i]) break; /* can fall through with i==0 */ return (u8)i; } /* TIME: 1/100 seconds * REG: 0-7 as mapped below */ static int lm93_spinup_time_map[8] = { 0, 10, 25, 40, 70, 100, 200, 400, }; static int LM93_SPINUP_TIME_FROM_REG(u8 reg) { return lm93_spinup_time_map[reg >> 5 & 0x07]; } /* round up to nearest match */ static u8 LM93_SPINUP_TIME_TO_REG(int time) { int i; for (i = 0; i < 7; i++) if (time <= lm93_spinup_time_map[i]) break; /* can fall through with i==8 */ return (u8)i; } #define LM93_RAMP_MIN 0 #define LM93_RAMP_MAX 75 static int LM93_RAMP_FROM_REG(u8 reg) { return (reg & 0x0f) * 5; } /* RAMP: 1/100 seconds REG: 50mS/bit 4-bits right justified */ static u8 LM93_RAMP_TO_REG(int ramp) { ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX); return (u8)((ramp + 2) / 5); } /* PROCHOT: 0-255, 0 => 0%, 255 => > 96.6% * REG: (same) */ static u8 LM93_PROCHOT_TO_REG(long prochot) { prochot = SENSORS_LIMIT(prochot, 0, 255); return (u8)prochot; } /* PROCHOT-INTERVAL: 73 - 37200 (1/100 seconds) * REG: 0-9 as mapped below */ static int lm93_interval_map[10] = { 73, 146, 290, 580, 1170, 2330, 4660, 9320, 18600, 37200, }; static int LM93_INTERVAL_FROM_REG(u8 reg) { return lm93_interval_map[reg & 0x0f]; } /* round up to nearest match */ static u8 LM93_INTERVAL_TO_REG(long interval) { int i; for (i = 0; i < 9; i++) if (interval <= lm93_interval_map[i]) break; /* can fall through with i==9 */ return (u8)i; } /* GPIO: 0-255, GPIO0 is LSB * REG: inverted */ static unsigned LM93_GPI_FROM_REG(u8 reg) { return ~reg & 0xff; } /* alarm bitmask definitions The LM93 has nearly 64 bits of error status... I've pared that down to what I think is a useful subset in order to fit it into 32 bits. Especially note that the #VRD_HOT alarms are missing because we provide that information as values in another sysfs file. If libsensors is extended to support 64 bit values, this could be revisited. */ #define LM93_ALARM_IN1 0x00000001 #define LM93_ALARM_IN2 0x00000002 #define LM93_ALARM_IN3 0x00000004 #define LM93_ALARM_IN4 0x00000008 #define LM93_ALARM_IN5 0x00000010 #define LM93_ALARM_IN6 0x00000020 #define LM93_ALARM_IN7 0x00000040 #define LM93_ALARM_IN8 0x00000080 #define LM93_ALARM_IN9 0x00000100 #define LM93_ALARM_IN10 0x00000200 #define LM93_ALARM_IN11 0x00000400 #define LM93_ALARM_IN12 0x00000800 #define LM93_ALARM_IN13 0x00001000 #define LM93_ALARM_IN14 0x00002000 #define LM93_ALARM_IN15 0x00004000 #define LM93_ALARM_IN16 0x00008000 #define LM93_ALARM_FAN1 0x00010000 #define LM93_ALARM_FAN2 0x00020000 #define LM93_ALARM_FAN3 0x00040000 #define LM93_ALARM_FAN4 0x00080000 #define LM93_ALARM_PH1_ERR 0x00100000 #define LM93_ALARM_PH2_ERR 0x00200000 #define LM93_ALARM_SCSI1_ERR 0x00400000 #define LM93_ALARM_SCSI2_ERR 0x00800000 #define LM93_ALARM_DVDDP1_ERR 0x01000000 #define LM93_ALARM_DVDDP2_ERR 0x02000000 #define LM93_ALARM_D1_ERR 0x04000000 #define LM93_ALARM_D2_ERR 0x08000000 #define LM93_ALARM_TEMP1 0x10000000 #define LM93_ALARM_TEMP2 0x20000000 #define LM93_ALARM_TEMP3 0x40000000 static unsigned LM93_ALARMS_FROM_REG(struct block1_t b1) { unsigned result; result = b1.host_status_2 & 0x3f; if (vccp_limit_type[0]) result |= (b1.host_status_4 & 0x10) << 2; else result |= b1.host_status_2 & 0x40; if (vccp_limit_type[1]) result |= (b1.host_status_4 & 0x20) << 2; else result |= b1.host_status_2 & 0x80; result |= b1.host_status_3 << 8; result |= (b1.fan_status & 0x0f) << 16; result |= (b1.p1_prochot_status & 0x80) << 13; result |= (b1.p2_prochot_status & 0x80) << 14; result |= (b1.host_status_4 & 0xfc) << 20; result |= (b1.host_status_1 & 0x07) << 28; return result; } #define MAX_RETRIES 5 static u8 lm93_read_byte(struct i2c_client *client, u8 reg) { int value, i; /* retry in case of read errors */ for (i=1; i<=MAX_RETRIES; i++) { if ((value = i2c_smbus_read_byte_data(client, reg)) >= 0) { return value; } else { dev_warn(&client->dev,"lm93: read byte data failed, " "address 0x%02x.\n", reg); mdelay(i + 3); } } /* <TODO> what to return in case of error? */ dev_err(&client->dev,"lm93: All read byte retries failed!!\n"); return 0; } static int lm93_write_byte(struct i2c_client *client, u8 reg, u8 value) { int result; /* <TODO> how to handle write errors? */ result = i2c_smbus_write_byte_data(client, reg, value); if (result < 0) dev_warn(&client->dev,"lm93: write byte data failed, " "0x%02x at address 0x%02x.\n", value, reg); return result; } static u16 lm93_read_word(struct i2c_client *client, u8 reg) { int value, i; /* retry in case of read errors */ for (i=1; i<=MAX_RETRIES; i++) { if ((value = i2c_smbus_read_word_data(client, reg)) >= 0) { return value; } else { dev_warn(&client->dev,"lm93: read word data failed, " "address 0x%02x.\n", reg); mdelay(i + 3); } } /* <TODO> what to return in case of error? */ dev_err(&client->dev,"lm93: All read word retries failed!!\n"); return 0; } static int lm93_write_word(struct i2c_client *client, u8 reg, u16 value) { int result; /* <TODO> how to handle write errors? */ result = i2c_smbus_write_word_data(client, reg, value); if (result < 0) dev_warn(&client->dev,"lm93: write word data failed, " "0x%04x at address 0x%02x.\n", value, reg); return result; } static u8 lm93_block_buffer[I2C_SMBUS_BLOCK_MAX]; /* read block data into values, retry if not expected length fbn => index to lm93_block_read_cmds table (Fixed Block Number - section 14.5.2 of LM93 datasheet) */ static void lm93_read_block(struct i2c_client *client, u8 fbn, u8 *values) { int i, result=0; for (i = 1; i <= MAX_RETRIES; i++) { result = i2c_smbus_read_block_data(client, lm93_block_read_cmds[fbn].cmd, lm93_block_buffer); if (result == lm93_block_read_cmds[fbn].len) { break; } else { dev_warn(&client->dev,"lm93: block read data failed, " "command 0x%02x.\n", lm93_block_read_cmds[fbn].cmd); mdelay(i + 3); } } if (result == lm93_block_read_cmds[fbn].len) { memcpy(values,lm93_block_buffer,lm93_block_read_cmds[fbn].len); } else { /* <TODO> what to do in case of error? */ } } static struct lm93_data *lm93_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); const unsigned long interval = HZ + (HZ / 2); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + interval) || !data->valid) { data->update(data, client); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* update routine for data that has no corresponding SMBus block command */ static void lm93_update_client_common(struct lm93_data *data, struct i2c_client *client) { int i; u8 *ptr; /* temp1 - temp4: limits */ for (i = 0; i < 4; i++) { data->temp_lim[i].min = lm93_read_byte(client, LM93_REG_TEMP_MIN(i)); data->temp_lim[i].max = lm93_read_byte(client, LM93_REG_TEMP_MAX(i)); } /* config register */ data->config = lm93_read_byte(client, LM93_REG_CONFIG); /* vid1 - vid2: values */ for (i = 0; i < 2; i++) data->vid[i] = lm93_read_byte(client, LM93_REG_VID(i)); /* prochot1 - prochot2: limits */ for (i = 0; i < 2; i++) data->prochot_max[i] = lm93_read_byte(client, LM93_REG_PROCHOT_MAX(i)); /* vccp1 - vccp2: VID relative limits */ for (i = 0; i < 2; i++) data->vccp_limits[i] = lm93_read_byte(client, LM93_REG_VCCP_LIMIT_OFF(i)); /* GPIO input state */ data->gpi = lm93_read_byte(client, LM93_REG_GPI); /* #PROCHOT override state */ data->prochot_override = lm93_read_byte(client, LM93_REG_PROCHOT_OVERRIDE); /* #PROCHOT intervals */ data->prochot_interval = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL); /* Fan Boost Temperature registers */ for (i = 0; i < 4; i++) data->boost[i] = lm93_read_byte(client, LM93_REG_BOOST(i)); /* Fan Boost Temperature Hyst. registers */ data->boost_hyst[0] = lm93_read_byte(client, LM93_REG_BOOST_HYST_12); data->boost_hyst[1] = lm93_read_byte(client, LM93_REG_BOOST_HYST_34); /* Temperature Zone Min. PWM & Hysteresis registers */ data->auto_pwm_min_hyst[0] = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_12); data->auto_pwm_min_hyst[1] = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_34); /* #PROCHOT & #VRDHOT PWM Ramp Control register */ data->pwm_ramp_ctl = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); /* misc setup registers */ data->sfc1 = lm93_read_byte(client, LM93_REG_SFC1); data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); /* write back alarm values to clear */ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) lm93_write_byte(client, LM93_REG_HOST_ERROR_1 + i, *(ptr + i)); } /* update routine which uses SMBus block data commands */ static void lm93_update_client_full(struct lm93_data *data, struct i2c_client *client) { dev_dbg(&client->dev,"starting device update (block data enabled)\n"); /* in1 - in16: values & limits */ lm93_read_block(client, 3, (u8 *)(data->block3)); lm93_read_block(client, 7, (u8 *)(data->block7)); /* temp1 - temp4: values */ lm93_read_block(client, 2, (u8 *)(data->block2)); /* prochot1 - prochot2: values */ lm93_read_block(client, 4, (u8 *)(data->block4)); /* fan1 - fan4: values & limits */ lm93_read_block(client, 5, (u8 *)(data->block5)); lm93_read_block(client, 8, (u8 *)(data->block8)); /* pmw control registers */ lm93_read_block(client, 9, (u8 *)(data->block9)); /* alarm values */ lm93_read_block(client, 1, (u8 *)(&data->block1)); /* auto/pwm registers */ lm93_read_block(client, 10, (u8 *)(&data->block10)); lm93_update_client_common(data, client); } /* update routine which uses SMBus byte/word data commands only */ static void lm93_update_client_min(struct lm93_data *data, struct i2c_client *client) { int i,j; u8 *ptr; dev_dbg(&client->dev,"starting device update (block data disabled)\n"); /* in1 - in16: values & limits */ for (i = 0; i < 16; i++) { data->block3[i] = lm93_read_byte(client, LM93_REG_IN(i)); data->block7[i].min = lm93_read_byte(client, LM93_REG_IN_MIN(i)); data->block7[i].max = lm93_read_byte(client, LM93_REG_IN_MAX(i)); } /* temp1 - temp4: values */ for (i = 0; i < 4; i++) { data->block2[i] = lm93_read_byte(client, LM93_REG_TEMP(i)); } /* prochot1 - prochot2: values */ for (i = 0; i < 2; i++) { data->block4[i].cur = lm93_read_byte(client, LM93_REG_PROCHOT_CUR(i)); data->block4[i].avg = lm93_read_byte(client, LM93_REG_PROCHOT_AVG(i)); } /* fan1 - fan4: values & limits */ for (i = 0; i < 4; i++) { data->block5[i] = lm93_read_word(client, LM93_REG_FAN(i)); data->block8[i] = lm93_read_word(client, LM93_REG_FAN_MIN(i)); } /* pwm control registers */ for (i = 0; i < 2; i++) { for (j = 0; j < 4; j++) { data->block9[i][j] = lm93_read_byte(client, LM93_REG_PWM_CTL(i,j)); } } /* alarm values */ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) { *(ptr + i) = lm93_read_byte(client, LM93_REG_HOST_ERROR_1 + i); } /* auto/pwm (base temp) registers */ for (i = 0; i < 4; i++) { data->block10.base[i] = lm93_read_byte(client, LM93_REG_TEMP_BASE(i)); } /* auto/pwm (offset temp) registers */ for (i = 0; i < 12; i++) { data->block10.offset[i] = lm93_read_byte(client, LM93_REG_TEMP_OFFSET(i)); } lm93_update_client_common(data, client); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_IN_FROM_REG(nr, data->block3[nr])); } static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 0); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 1); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 2); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 3); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 4); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 5); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 6); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 7); static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in, NULL, 8); static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in, NULL, 9); static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_in, NULL, 10); static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_in, NULL, 11); static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_in, NULL, 12); static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_in, NULL, 13); static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_in, NULL, 14); static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in, NULL, 15); static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int vccp = nr - 6; long rc, vid; if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) { vid = LM93_VID_FROM_REG(data->vid[vccp]); rc = LM93_IN_MIN_FROM_REG(data->vccp_limits[vccp], vid); } else { rc = LM93_IN_FROM_REG(nr, data->block7[nr].min); \ } return sprintf(buf, "%ld\n", rc); \ } static ssize_t store_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); int vccp = nr - 6; long vid; mutex_lock(&data->update_lock); if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) { vid = LM93_VID_FROM_REG(data->vid[vccp]); data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0xf0) | LM93_IN_REL_TO_REG(val, 0, vid); lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp), data->vccp_limits[vccp]); } else { data->block7[nr].min = LM93_IN_TO_REG(nr,val); lm93_write_byte(client, LM93_REG_IN_MIN(nr), data->block7[nr].min); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5); static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6); static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7); static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8); static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9); static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 10); static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 11); static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 12); static SENSOR_DEVICE_ATTR(in14_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 13); static SENSOR_DEVICE_ATTR(in15_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 14); static SENSOR_DEVICE_ATTR(in16_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 15); static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int vccp = nr - 6; long rc, vid; if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) { vid = LM93_VID_FROM_REG(data->vid[vccp]); rc = LM93_IN_MAX_FROM_REG(data->vccp_limits[vccp],vid); } else { rc = LM93_IN_FROM_REG(nr,data->block7[nr].max); \ } return sprintf(buf,"%ld\n",rc); \ } static ssize_t store_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); int vccp = nr - 6; long vid; mutex_lock(&data->update_lock); if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) { vid = LM93_VID_FROM_REG(data->vid[vccp]); data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0x0f) | LM93_IN_REL_TO_REG(val, 1, vid); lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp), data->vccp_limits[vccp]); } else { data->block7[nr].max = LM93_IN_TO_REG(nr,val); lm93_write_byte(client, LM93_REG_IN_MAX(nr), data->block7[nr].max); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5); static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6); static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7); static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8); static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9); static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 10); static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 11); static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 12); static SENSOR_DEVICE_ATTR(in14_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 13); static SENSOR_DEVICE_ATTR(in15_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 14); static SENSOR_DEVICE_ATTR(in16_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 15); static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block2[nr])); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].min)); } static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_lim[nr].min = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_MIN(nr), data->temp_lim[nr].min); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 1); static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 2); static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].max)); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_lim[nr].max = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_MAX(nr), data->temp_lim[nr].max); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 1); static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 2); static ssize_t show_temp_auto_base(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block10.base[nr])); } static ssize_t store_temp_auto_base(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->block10.base[nr] = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_BASE(nr), data->block10.base[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 0); static SENSOR_DEVICE_ATTR(temp2_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 1); static SENSOR_DEVICE_ATTR(temp3_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 2); static ssize_t show_temp_auto_boost(struct device *dev, struct device_attribute *attr,char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->boost[nr])); } static ssize_t store_temp_auto_boost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->boost[nr] = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_BOOST(nr), data->boost[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 0); static SENSOR_DEVICE_ATTR(temp2_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 1); static SENSOR_DEVICE_ATTR(temp3_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 2); static ssize_t show_temp_auto_boost_hyst(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf,"%d\n", LM93_AUTO_BOOST_HYST_FROM_REGS(data, nr, mode)); } static ssize_t store_temp_auto_boost_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); data->boost_hyst[nr/2] = LM93_AUTO_BOOST_HYST_TO_REG(data, val, nr, 1); lm93_write_byte(client, LM93_REG_BOOST_HYST(nr), data->boost_hyst[nr/2]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 2); static ssize_t show_temp_auto_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr); int nr = s_attr->index; int ofs = s_attr->nr; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf,"%d\n", LM93_TEMP_AUTO_OFFSET_FROM_REG(data->block10.offset[ofs], nr,mode)); } static ssize_t store_temp_auto_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr); int nr = s_attr->index; int ofs = s_attr->nr; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); data->block10.offset[ofs] = LM93_TEMP_AUTO_OFFSET_TO_REG( data->block10.offset[ofs], val, nr, 1); lm93_write_byte(client, LM93_REG_TEMP_OFFSET(ofs), data->block10.offset[ofs]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR_2(temp1_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 0); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 1); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 2); static ssize_t show_temp_auto_pwm_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; u8 reg, ctl4; struct lm93_data *data = lm93_update_device(dev); reg = data->auto_pwm_min_hyst[nr/2] >> 4 & 0x0f; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf,"%d\n",LM93_PWM_FROM_REG(reg, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ)); } static ssize_t store_temp_auto_pwm_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 reg, ctl4; mutex_lock(&data->update_lock); reg = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST(nr)); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4)); reg = (reg & 0x0f) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4; data->auto_pwm_min_hyst[nr/2] = reg; lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 0); static SENSOR_DEVICE_ATTR(temp2_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 1); static SENSOR_DEVICE_ATTR(temp3_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 2); static ssize_t show_temp_auto_offset_hyst(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf,"%d\n",LM93_TEMP_OFFSET_FROM_REG( data->auto_pwm_min_hyst[nr/2], mode)); } static ssize_t store_temp_auto_offset_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 reg; mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); reg = data->auto_pwm_min_hyst[nr/2]; reg = (reg & 0xf0) | (LM93_TEMP_OFFSET_TO_REG(val, 1) & 0x0f); data->auto_pwm_min_hyst[nr/2] = reg; lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 2); static ssize_t show_fan_input(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *s_attr = to_sensor_dev_attr(attr); int nr = s_attr->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block5[nr])); } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3); static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block8[nr])); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->block8[nr] = LM93_FAN_TO_REG(val); lm93_write_word(client,LM93_REG_FAN_MIN(nr),data->block8[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1); static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2); static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3); /* some tedious bit-twiddling here to deal with the register format: data->sf_tach_to_pwm: (tach to pwm mapping bits) bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 T4:P2 T4:P1 T3:P2 T3:P1 T2:P2 T2:P1 T1:P2 T1:P1 data->sfc2: (enable bits) bit | 3 | 2 | 1 | 0 T4 T3 T2 T1 */ static ssize_t show_fan_smart_tach(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); long rc = 0; int mapping; /* extract the relevant mapping */ mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03; /* if there's a mapping and it's enabled */ if (mapping && ((data->sfc2 >> nr) & 0x01)) rc = mapping; return sprintf(buf,"%ld\n",rc); } /* helper function - must grab data->update_lock before calling fan is 0-3, indicating fan1-fan4 */ static void lm93_write_fan_smart_tach(struct i2c_client *client, struct lm93_data *data, int fan, long value) { /* insert the new mapping and write it out */ data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); data->sf_tach_to_pwm &= ~(0x3 << fan * 2); data->sf_tach_to_pwm |= value << fan * 2; lm93_write_byte(client, LM93_REG_SF_TACH_TO_PWM, data->sf_tach_to_pwm); /* insert the enable bit and write it out */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); if (value) data->sfc2 |= 1 << fan; else data->sfc2 &= ~(1 << fan); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); } static ssize_t store_fan_smart_tach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); /* sanity test, ignore the write otherwise */ if (0 <= val && val <= 2) { /* can't enable if pwm freq is 22.5KHz */ if (val) { u8 ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(val-1,LM93_PWM_CTL4)); if ((ctl4 & 0x07) == 0) val = 0; } lm93_write_fan_smart_tach(client, data, nr, val); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 0); static SENSOR_DEVICE_ATTR(fan2_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 1); static SENSOR_DEVICE_ATTR(fan3_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 2); static SENSOR_DEVICE_ATTR(fan4_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 3); static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl2, ctl4; long rc; ctl2 = data->block9[nr][LM93_PWM_CTL2]; ctl4 = data->block9[nr][LM93_PWM_CTL4]; if (ctl2 & 0x01) /* show user commanded value if enabled */ rc = data->pwm_override[nr]; else /* show present h/w value if manual pwm disabled */ rc = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); return sprintf(buf,"%ld\n",rc); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ctl2, ctl4; mutex_lock(&data->update_lock); ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2)); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4)); ctl2 = (ctl2 & 0x0f) | LM93_PWM_TO_REG(val,(ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4; /* save user commanded value */ data->pwm_override[nr] = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl2; long rc; ctl2 = data->block9[nr][LM93_PWM_CTL2]; if (ctl2 & 0x01) /* manual override enabled ? */ rc = ((ctl2 & 0xF0) == 0xF0) ? 0 : 1; else rc = 2; return sprintf(buf,"%ld\n",rc); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ctl2; mutex_lock(&data->update_lock); ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2)); switch (val) { case 0: ctl2 |= 0xF1; /* enable manual override, set PWM to max */ break; case 1: ctl2 |= 0x01; /* enable manual override */ break; case 2: ctl2 &= ~0x01; /* disable manual override */ break; default: mutex_unlock(&data->update_lock); return -EINVAL; } lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 1); static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl4; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf,"%d\n",LM93_PWM_FREQ_FROM_REG(ctl4)); } /* helper function - must grab data->update_lock before calling pwm is 0-1, indicating pwm1-pwm2 this disables smart tach for all tach channels bound to the given pwm */ static void lm93_disable_fan_smart_tach(struct i2c_client *client, struct lm93_data *data, int pwm) { int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); int mask; /* collapse the mapping into a mask of enable bits */ mapping = (mapping >> pwm) & 0x55; mask = mapping & 0x01; mask |= (mapping & 0x04) >> 1; mask |= (mapping & 0x10) >> 2; mask |= (mapping & 0x40) >> 3; /* disable smart tach according to the mask */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 &= ~mask; lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); } static ssize_t store_pwm_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ctl4; mutex_lock(&data->update_lock); ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4)); ctl4 = (ctl4 & 0xf8) | LM93_PWM_FREQ_TO_REG(val); data->block9[nr][LM93_PWM_CTL4] = ctl4; /* ctl4 == 0 -> 22.5KHz -> disable smart tach */ if (!ctl4) lm93_disable_fan_smart_tach(client, data, nr); lm93_write_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4), ctl4); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO, show_pwm_freq, store_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IWUSR | S_IRUGO, show_pwm_freq, store_pwm_freq, 1); static ssize_t show_pwm_auto_channels(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",data->block9[nr][LM93_PWM_CTL1]); } static ssize_t store_pwm_auto_channels(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255); lm93_write_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL1), data->block9[nr][LM93_PWM_CTL1]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_channels, S_IWUSR | S_IRUGO, show_pwm_auto_channels, store_pwm_auto_channels, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_channels, S_IWUSR | S_IRUGO, show_pwm_auto_channels, store_pwm_auto_channels, 1); static ssize_t show_pwm_auto_spinup_min(struct device *dev, struct device_attribute *attr,char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl3, ctl4; ctl3 = data->block9[nr][LM93_PWM_CTL3]; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf,"%d\n", LM93_PWM_FROM_REG(ctl3 & 0x0f, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ)); } static ssize_t store_pwm_auto_spinup_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ctl3, ctl4; mutex_lock(&data->update_lock); ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3)); ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4)); ctl3 = (ctl3 & 0xf0) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); data->block9[nr][LM93_PWM_CTL3] = ctl3; lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_min, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_min, store_pwm_auto_spinup_min, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_min, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_min, store_pwm_auto_spinup_min, 1); static ssize_t show_pwm_auto_spinup_time(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_SPINUP_TIME_FROM_REG( data->block9[nr][LM93_PWM_CTL3])); } static ssize_t store_pwm_auto_spinup_time(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ctl3; mutex_lock(&data->update_lock); ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3)); ctl3 = (ctl3 & 0x1f) | (LM93_SPINUP_TIME_TO_REG(val) << 5 & 0xe0); data->block9[nr][LM93_PWM_CTL3] = ctl3; lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_time, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_time, store_pwm_auto_spinup_time, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_time, store_pwm_auto_spinup_time, 1); static ssize_t show_pwm_auto_prochot_ramp(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n", LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f)); } static ssize_t store_pwm_auto_prochot_ramp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ramp; mutex_lock(&data->update_lock); ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); ramp = (ramp & 0x0f) | (LM93_RAMP_TO_REG(val) << 4 & 0xf0); lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR, show_pwm_auto_prochot_ramp, store_pwm_auto_prochot_ramp); static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n", LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f)); } static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 ramp; mutex_lock(&data->update_lock); ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); ramp = (ramp & 0xf0) | (LM93_RAMP_TO_REG(val) & 0x0f); lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp); mutex_unlock(&data->update_lock); return 0; } static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR, show_pwm_auto_vrdhot_ramp, store_pwm_auto_vrdhot_ramp); static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_VID_FROM_REG(data->vid[nr])); } static SENSOR_DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL, 0); static SENSOR_DEVICE_ATTR(cpu1_vid, S_IRUGO, show_vid, NULL, 1); static ssize_t show_prochot(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",data->block4[nr].cur); } static SENSOR_DEVICE_ATTR(prochot1, S_IRUGO, show_prochot, NULL, 0); static SENSOR_DEVICE_ATTR(prochot2, S_IRUGO, show_prochot, NULL, 1); static ssize_t show_prochot_avg(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",data->block4[nr].avg); } static SENSOR_DEVICE_ATTR(prochot1_avg, S_IRUGO, show_prochot_avg, NULL, 0); static SENSOR_DEVICE_ATTR(prochot2_avg, S_IRUGO, show_prochot_avg, NULL, 1); static ssize_t show_prochot_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",data->prochot_max[nr]); } static ssize_t store_prochot_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->prochot_max[nr] = LM93_PROCHOT_TO_REG(val); lm93_write_byte(client, LM93_REG_PROCHOT_MAX(nr), data->prochot_max[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_max, S_IWUSR | S_IRUGO, show_prochot_max, store_prochot_max, 0); static SENSOR_DEVICE_ATTR(prochot2_max, S_IWUSR | S_IRUGO, show_prochot_max, store_prochot_max, 1); static const u8 prochot_override_mask[] = { 0x80, 0x40 }; static ssize_t show_prochot_override(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n", (data->prochot_override & prochot_override_mask[nr]) ? 1 : 0); } static ssize_t store_prochot_override(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); if (val) data->prochot_override |= prochot_override_mask[nr]; else data->prochot_override &= (~prochot_override_mask[nr]); lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE, data->prochot_override); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_override, S_IWUSR | S_IRUGO, show_prochot_override, store_prochot_override, 0); static SENSOR_DEVICE_ATTR(prochot2_override, S_IWUSR | S_IRUGO, show_prochot_override, store_prochot_override, 1); static ssize_t show_prochot_interval(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 tmp; if (nr==1) tmp = (data->prochot_interval & 0xf0) >> 4; else tmp = data->prochot_interval & 0x0f; return sprintf(buf,"%d\n",LM93_INTERVAL_FROM_REG(tmp)); } static ssize_t store_prochot_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); u8 tmp; mutex_lock(&data->update_lock); tmp = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL); if (nr==1) tmp = (tmp & 0x0f) | (LM93_INTERVAL_TO_REG(val) << 4); else tmp = (tmp & 0xf0) | LM93_INTERVAL_TO_REG(val); data->prochot_interval = tmp; lm93_write_byte(client, LM93_REG_PROCHOT_INTERVAL, tmp); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO, show_prochot_interval, store_prochot_interval, 0); static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO, show_prochot_interval, store_prochot_interval, 1); static ssize_t show_prochot_override_duty_cycle(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",data->prochot_override & 0x0f); } static ssize_t store_prochot_override_duty_cycle(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->prochot_override = (data->prochot_override & 0xf0) | SENSORS_LIMIT(val, 0, 15); lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE, data->prochot_override); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR, show_prochot_override_duty_cycle, store_prochot_override_duty_cycle); static ssize_t show_prochot_short(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",(data->config & 0x10) ? 1 : 0); } static ssize_t store_prochot_short(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); if (val) data->config |= 0x10; else data->config &= ~0x10; lm93_write_byte(client, LM93_REG_CONFIG, data->config); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR, show_prochot_short, store_prochot_short); static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n", data->block1.host_status_1 & (1 << (nr+4)) ? 1 : 0); } static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0); static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1); static ssize_t show_gpio(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_GPI_FROM_REG(data->gpi)); } static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL); static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf,"%d\n",LM93_ALARMS_FROM_REG(data->block1)); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static struct attribute *lm93_attrs[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_in12_input.dev_attr.attr, &sensor_dev_attr_in13_input.dev_attr.attr, &sensor_dev_attr_in14_input.dev_attr.attr, &sensor_dev_attr_in15_input.dev_attr.attr, &sensor_dev_attr_in16_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in11_min.dev_attr.attr, &sensor_dev_attr_in12_min.dev_attr.attr, &sensor_dev_attr_in13_min.dev_attr.attr, &sensor_dev_attr_in14_min.dev_attr.attr, &sensor_dev_attr_in15_min.dev_attr.attr, &sensor_dev_attr_in16_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in11_max.dev_attr.attr, &sensor_dev_attr_in12_max.dev_attr.attr, &sensor_dev_attr_in13_max.dev_attr.attr, &sensor_dev_attr_in14_max.dev_attr.attr, &sensor_dev_attr_in15_max.dev_attr.attr, &sensor_dev_attr_in16_max.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp1_auto_base.dev_attr.attr, &sensor_dev_attr_temp2_auto_base.dev_attr.attr, &sensor_dev_attr_temp3_auto_base.dev_attr.attr, &sensor_dev_attr_temp1_auto_boost.dev_attr.attr, &sensor_dev_attr_temp2_auto_boost.dev_attr.attr, &sensor_dev_attr_temp3_auto_boost.dev_attr.attr, &sensor_dev_attr_temp1_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp3_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan1_smart_tach.dev_attr.attr, &sensor_dev_attr_fan2_smart_tach.dev_attr.attr, &sensor_dev_attr_fan3_smart_tach.dev_attr.attr, &sensor_dev_attr_fan4_smart_tach.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels.dev_attr.attr, &sensor_dev_attr_pwm1_auto_spinup_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_spinup_min.dev_attr.attr, &sensor_dev_attr_pwm1_auto_spinup_time.dev_attr.attr, &sensor_dev_attr_pwm2_auto_spinup_time.dev_attr.attr, &dev_attr_pwm_auto_prochot_ramp.attr, &dev_attr_pwm_auto_vrdhot_ramp.attr, &sensor_dev_attr_cpu0_vid.dev_attr.attr, &sensor_dev_attr_cpu1_vid.dev_attr.attr, &sensor_dev_attr_prochot1.dev_attr.attr, &sensor_dev_attr_prochot2.dev_attr.attr, &sensor_dev_attr_prochot1_avg.dev_attr.attr, &sensor_dev_attr_prochot2_avg.dev_attr.attr, &sensor_dev_attr_prochot1_max.dev_attr.attr, &sensor_dev_attr_prochot2_max.dev_attr.attr, &sensor_dev_attr_prochot1_override.dev_attr.attr, &sensor_dev_attr_prochot2_override.dev_attr.attr, &sensor_dev_attr_prochot1_interval.dev_attr.attr, &sensor_dev_attr_prochot2_interval.dev_attr.attr, &dev_attr_prochot_override_duty_cycle.attr, &dev_attr_prochot_short.attr, &sensor_dev_attr_vrdhot1.dev_attr.attr, &sensor_dev_attr_vrdhot2.dev_attr.attr, &dev_attr_gpio.attr, &dev_attr_alarms.attr, NULL }; static struct attribute_group lm93_attr_grp = { .attrs = lm93_attrs, }; static void lm93_init_client(struct i2c_client *client) { int i; u8 reg; /* configure VID pin input thresholds */ reg = lm93_read_byte(client, LM93_REG_GPI_VID_CTL); lm93_write_byte(client, LM93_REG_GPI_VID_CTL, reg | (vid_agtl ? 0x03 : 0x00)); if (init) { /* enable #ALERT pin */ reg = lm93_read_byte(client, LM93_REG_CONFIG); lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x08); /* enable ASF mode for BMC status registers */ reg = lm93_read_byte(client, LM93_REG_STATUS_CONTROL); lm93_write_byte(client, LM93_REG_STATUS_CONTROL, reg | 0x02); /* set sleep state to S0 */ lm93_write_byte(client, LM93_REG_SLEEP_CONTROL, 0); /* unmask #VRDHOT and dynamic VCCP (if nec) error events */ reg = lm93_read_byte(client, LM93_REG_MISC_ERR_MASK); reg &= ~0x03; reg &= ~(vccp_limit_type[0] ? 0x10 : 0); reg &= ~(vccp_limit_type[1] ? 0x20 : 0); lm93_write_byte(client, LM93_REG_MISC_ERR_MASK, reg); } /* start monitoring */ reg = lm93_read_byte(client, LM93_REG_CONFIG); lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x01); /* spin until ready */ for (i=0; i<20; i++) { msleep(10); if ((lm93_read_byte(client, LM93_REG_CONFIG) & 0x80) == 0x80) return; } dev_warn(&client->dev,"timed out waiting for sensor " "chip to signal ready!\n"); } /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int mfr, ver; const char *name; if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN)) return -ENODEV; /* detection */ mfr = lm93_read_byte(client, LM93_REG_MFR_ID); if (mfr != 0x01) { dev_dbg(&adapter->dev, "detect failed, bad manufacturer id 0x%02x!\n", mfr); return -ENODEV; } ver = lm93_read_byte(client, LM93_REG_VER); switch (ver) { case LM93_MFR_ID: case LM93_MFR_ID_PROTOTYPE: name = "lm93"; break; case LM94_MFR_ID_2: case LM94_MFR_ID: case LM94_MFR_ID_PROTOTYPE: name = "lm94"; break; default: dev_dbg(&adapter->dev, "detect failed, bad version id 0x%02x!\n", ver); return -ENODEV; } strlcpy(info->type, name, I2C_NAME_SIZE); dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n", client->name, i2c_adapter_id(client->adapter), client->addr); return 0; } static int lm93_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm93_data *data; int err, func; void (*update)(struct lm93_data *, struct i2c_client *); /* choose update routine based on bus capabilities */ func = i2c_get_functionality(client->adapter); if (((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) && (!disable_block)) { dev_dbg(&client->dev, "using SMBus block data transactions\n"); update = lm93_update_client_full; } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) { dev_dbg(&client->dev, "disabled SMBus block data " "transactions\n"); update = lm93_update_client_min; } else { dev_dbg(&client->dev, "detect failed, " "smbus byte and/or word data not supported!\n"); err = -ENODEV; goto err_out; } data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL); if (!data) { dev_dbg(&client->dev, "out of memory!\n"); err = -ENOMEM; goto err_out; } i2c_set_clientdata(client, data); /* housekeeping */ data->valid = 0; data->update = update; mutex_init(&data->update_lock); /* initialize the chip */ lm93_init_client(client); err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp); if (err) goto err_free; /* Register hwmon driver class */ data->hwmon_dev = hwmon_device_register(&client->dev); if ( !IS_ERR(data->hwmon_dev)) return 0; err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); err_free: kfree(data); err_out: return err; } static int lm93_remove(struct i2c_client *client) { struct lm93_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); kfree(data); return 0; } static const struct i2c_device_id lm93_id[] = { { "lm93", 0 }, { "lm94", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm93_id); static struct i2c_driver lm93_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm93", }, .probe = lm93_probe, .remove = lm93_remove, .id_table = lm93_id, .detect = lm93_detect, .address_list = normal_i2c, }; static int __init lm93_init(void) { return i2c_add_driver(&lm93_driver); } static void __exit lm93_exit(void) { i2c_del_driver(&lm93_driver); } MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " "Hans J. Koch <hjk@hansjkoch.de>"); MODULE_DESCRIPTION("LM93 driver"); MODULE_LICENSE("GPL"); module_init(lm93_init); module_exit(lm93_exit);
gpl-2.0
carepack/android_kernel_google_msm
drivers/infiniband/hw/nes/nes_hw.c
3453
132345
/* * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/if_vlan.h> #include <linux/inet_lro.h> #include <linux/slab.h> #include "nes.h" static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; module_param(nes_lro_max_aggr, uint, 0444); MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); static int wide_ppm_offset; module_param(wide_ppm_offset, int, 0644); MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm"); static u32 crit_err_count; u32 int_mod_timer_init; u32 int_mod_cq_depth_256; u32 int_mod_cq_depth_128; u32 int_mod_cq_depth_32; u32 int_mod_cq_depth_24; u32 int_mod_cq_depth_16; u32 int_mod_cq_depth_4; u32 int_mod_cq_depth_1; static const u8 nes_max_critical_error_count = 100; #include "nes_cm.h" static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq); static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count); static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, struct nes_adapter *nesadapter, u8 OneG_Mode); static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq); static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq); static void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe); static void process_critical_error(struct nes_device *nesdev); static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); static void nes_terminate_timeout(unsigned long context); static void nes_terminate_start_timer(struct nes_qp *nesqp); #ifdef CONFIG_INFINIBAND_NES_DEBUG static unsigned char *nes_iwarp_state_str[] = { "Non-Existent", "Idle", "RTS", "Closing", "RSVD1", "Terminate", "Error", "RSVD2", }; static unsigned char *nes_tcp_state_str[] = { "Non-Existent", "Closed", "Listen", "SYN Sent", "SYN Rcvd", "Established", "Close Wait", "FIN Wait 1", "Closing", "Last Ack", "FIN Wait 2", "Time Wait", "RSVD1", "RSVD2", "RSVD3", "RSVD4", }; #endif static inline void print_ip(struct nes_cm_node *cm_node) { unsigned char *rem_addr; if (cm_node) { rem_addr = (unsigned char *)&cm_node->rem_addr; printk(KERN_ERR PFX "Remote IP addr: %pI4\n", rem_addr); } } /** * nes_nic_init_timer_defaults */ void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode) { unsigned long flags; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW; shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH; if (jumbomode) { shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW; shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET; shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH; } else { shared_timer->threshold_low = DEFAULT_NES_QL_LOW; shared_timer->threshold_target = DEFAULT_NES_QL_TARGET; shared_timer->threshold_high = DEFAULT_NES_QL_HIGH; } /* todo use netdev->mtu to set thresholds */ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); } /** * nes_nic_init_timer */ static void nes_nic_init_timer(struct nes_device *nesdev) { unsigned long flags; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); if (shared_timer->timer_in_use_old == 0) { nesdev->deepcq_count = 0; shared_timer->timer_direction_upward = 0; shared_timer->timer_direction_downward = 0; shared_timer->timer_in_use = NES_NIC_FAST_TIMER; shared_timer->timer_in_use_old = 0; } if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) { shared_timer->timer_in_use_old = shared_timer->timer_in_use; nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 | ((u32)(shared_timer->timer_in_use*8))); } /* todo use netdev->mtu to set thresholds */ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); } /** * nes_nic_tune_timer */ static void nes_nic_tune_timer(struct nes_device *nesdev) { unsigned long flags; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; u16 cq_count = nesdev->currcq_count; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); if (shared_timer->cq_count_old <= cq_count) shared_timer->cq_direction_downward = 0; else shared_timer->cq_direction_downward++; shared_timer->cq_count_old = cq_count; if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { if (cq_count <= shared_timer->threshold_low && shared_timer->threshold_low > 4) { shared_timer->threshold_low = shared_timer->threshold_low/2; shared_timer->cq_direction_downward=0; nesdev->currcq_count = 0; spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); return; } } if (cq_count > 1) { nesdev->deepcq_count += cq_count; if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */ shared_timer->timer_direction_upward++; shared_timer->timer_direction_downward = 0; } else if (cq_count <= shared_timer->threshold_target) { /* balanced */ shared_timer->timer_direction_upward = 0; shared_timer->timer_direction_downward = 0; } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */ shared_timer->timer_direction_downward++; shared_timer->timer_direction_upward = 0; } else if (cq_count <= (shared_timer->threshold_high) * 2) { shared_timer->timer_in_use -= 2; shared_timer->timer_direction_upward = 0; shared_timer->timer_direction_downward++; } else { shared_timer->timer_in_use -= 4; shared_timer->timer_direction_upward = 0; shared_timer->timer_direction_downward++; } if (shared_timer->timer_direction_upward > 3 ) { /* using history */ shared_timer->timer_in_use += 3; shared_timer->timer_direction_upward = 0; shared_timer->timer_direction_downward = 0; } if (shared_timer->timer_direction_downward > 5) { /* using history */ shared_timer->timer_in_use -= 4 ; shared_timer->timer_direction_downward = 0; shared_timer->timer_direction_upward = 0; } } /* boundary checking */ if (shared_timer->timer_in_use > shared_timer->threshold_high) shared_timer->timer_in_use = shared_timer->threshold_high; else if (shared_timer->timer_in_use < shared_timer->threshold_low) shared_timer->timer_in_use = shared_timer->threshold_low; nesdev->currcq_count = 0; spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); } /** * nes_init_adapter - initialize adapter */ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { struct nes_adapter *nesadapter = NULL; unsigned long num_pds; u32 u32temp; u32 port_count; u16 max_rq_wrs; u16 max_sq_wrs; u32 max_mr; u32 max_256pbl; u32 max_4kpbl; u32 max_qp; u32 max_irrq; u32 max_cq; u32 hte_index_mask; u32 adapter_size; u32 arp_table_size; u16 vendor_id; u16 device_id; u8 OneG_Mode; u8 func_index; /* search the list of existing adapters */ list_for_each_entry(nesadapter, &nes_adapter_list, list) { nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X," " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n", nesdev->pcidev->devfn, PCI_SLOT(nesadapter->devfn), nesadapter->bus_number, PCI_SLOT(nesdev->pcidev->devfn), nesdev->pcidev->bus->number ); if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) && (nesadapter->bus_number == nesdev->pcidev->bus->number)) { nesadapter->ref_count++; return nesadapter; } } /* no adapter found */ num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT; if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) { nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n", hw_rev); return NULL; } nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n", nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8), nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS), nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4), nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8)); nes_debug(NES_DBG_INIT, "Reset and init NE020\n"); if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0) return NULL; max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE); nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp); u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE); if (max_qp > ((u32)1 << (u32temp & 0x001f))) { nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n", max_qp, u32temp); max_qp = (u32)1 << (u32temp & 0x001f); } hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1; nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n", max_qp, hte_index_mask); u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT); max_irrq = 1 << (u32temp & 0x001f); if (max_qp > max_irrq) { max_qp = max_irrq; nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n", max_qp); } /* there should be no reason to allocate more pds than qps */ if (num_pds > max_qp) num_pds = max_qp; u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE); max_mr = (u32)8192 << (u32temp & 0x7); u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE); max_256pbl = (u32)1 << (u32temp & 0x0000001f); max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f); max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE); u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE); arp_table_size = 1 << u32temp; adapter_size = (sizeof(struct nes_adapter) + (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1)); adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp); adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr); adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq); adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds); adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size); adapter_size += sizeof(struct nes_qp **) * max_qp; /* allocate a new adapter struct */ nesadapter = kzalloc(adapter_size, GFP_KERNEL); if (nesadapter == NULL) { return NULL; } nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n", nesadapter, (u32)sizeof(struct nes_adapter), adapter_size); if (nes_read_eeprom_values(nesdev, nesadapter)) { printk(KERN_ERR PFX "Unable to read EEPROM data.\n"); kfree(nesadapter); return NULL; } nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) | (nesadapter->mac_addr_low >> 24); pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn, PCI_DEVICE_ID, &device_id); nesadapter->vendor_part_id = device_id; if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter, OneG_Mode)) { kfree(nesadapter); return NULL; } nes_init_csr_ne020(nesdev, hw_rev, port_count); memset(nesadapter->pft_mcast_map, 255, sizeof nesadapter->pft_mcast_map); /* populate the new nesadapter */ nesadapter->devfn = nesdev->pcidev->devfn; nesadapter->bus_number = nesdev->pcidev->bus->number; nesadapter->ref_count = 1; nesadapter->timer_int_req = 0xffff0000; nesadapter->OneG_Mode = OneG_Mode; nesadapter->doorbell_start = nesdev->doorbell_region; /* nesadapter->tick_delta = clk_divisor; */ nesadapter->hw_rev = hw_rev; nesadapter->port_count = port_count; nesadapter->max_qp = max_qp; nesadapter->hte_index_mask = hte_index_mask; nesadapter->max_irrq = max_irrq; nesadapter->max_mr = max_mr; nesadapter->max_256pbl = max_256pbl - 1; nesadapter->max_4kpbl = max_4kpbl - 1; nesadapter->max_cq = max_cq; nesadapter->free_256pbl = max_256pbl - 1; nesadapter->free_4kpbl = max_4kpbl - 1; nesadapter->max_pd = num_pds; nesadapter->arp_table_size = arp_table_size; nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT; if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) { nesadapter->et_use_adaptive_rx_coalesce = 0; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; } else { nesadapter->et_use_adaptive_rx_coalesce = 1; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; nesadapter->et_rx_coalesce_usecs_irq = 0; printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__); } /* Setup and enable the periodic timer */ if (nesadapter->et_rx_coalesce_usecs_irq) nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8))); else nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000); nesadapter->base_pd = 1; nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)]; nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)]; nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)]; nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)]; nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]); /* mark the usual suspect QPs, MR and CQs as in use */ for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) { set_bit(u32temp, nesadapter->allocated_qps); set_bit(u32temp, nesadapter->allocated_cqs); } set_bit(0, nesadapter->allocated_mrs); for (u32temp = 0; u32temp < 20; u32temp++) set_bit(u32temp, nesadapter->allocated_pds); u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES); max_rq_wrs = ((u32temp >> 8) & 3); switch (max_rq_wrs) { case 0: max_rq_wrs = 4; break; case 1: max_rq_wrs = 16; break; case 2: max_rq_wrs = 32; break; case 3: max_rq_wrs = 512; break; } max_sq_wrs = (u32temp & 3); switch (max_sq_wrs) { case 0: max_sq_wrs = 4; break; case 1: max_sq_wrs = 16; break; case 2: max_sq_wrs = 32; break; case 3: max_sq_wrs = 512; break; } nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs); nesadapter->max_irrq_wr = (u32temp >> 16) & 3; nesadapter->max_sge = 4; nesadapter->max_cqe = 32766; if (nes_read_eeprom_values(nesdev, nesadapter)) { printk(KERN_ERR PFX "Unable to read EEPROM data.\n"); kfree(nesadapter); return NULL; } u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG); nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG, (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff)); /* setup port configuration */ if (nesadapter->port_count == 1) { nesadapter->log_port = 0x00000000; if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002); else nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); } else { if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { nesadapter->log_port = 0x000000D8; } else { if (nesadapter->port_count == 2) nesadapter->log_port = 0x00000044; else nesadapter->log_port = 0x000000e4; } nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); } nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, nesadapter->log_port); nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n", nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT)); spin_lock_init(&nesadapter->resource_lock); spin_lock_init(&nesadapter->phy_lock); spin_lock_init(&nesadapter->pbl_lock); spin_lock_init(&nesadapter->periodic_timer_lock); INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]); INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]); INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]); INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]); if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) { u32 pcs_control_status0, pcs_control_status1; u32 reset_value; u32 i = 0; u32 int_cnt = 0; u32 ext_cnt = 0; unsigned long flags; u32 j = 0; pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); for (i = 0; i < NES_MAX_LINK_CHECK; i++) { pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) int_cnt++; msleep(1); } if (int_cnt > 1) { spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8); mh_detected++; reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); reset_value |= 0x0000003d; nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (j++ < 5000)); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); for (i = 0; i < NES_MAX_LINK_CHECK; i++) { pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) { if (++ext_cnt > int_cnt) { spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); mh_detected++; reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); reset_value |= 0x0000003d; nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (j++ < 5000)); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); break; } } msleep(1); } } } if (nesadapter->hw_rev == NE020_REV) { init_timer(&nesadapter->mh_timer); nesadapter->mh_timer.function = nes_mh_fix; nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */ nesadapter->mh_timer.data = (unsigned long)nesdev; add_timer(&nesadapter->mh_timer); } else { nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000); } init_timer(&nesadapter->lc_timer); nesadapter->lc_timer.function = nes_clc; nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */ nesadapter->lc_timer.data = (unsigned long)nesdev; add_timer(&nesadapter->lc_timer); list_add_tail(&nesadapter->list, &nes_adapter_list); for (func_index = 0; func_index < 8; func_index++) { pci_bus_read_config_word(nesdev->pcidev->bus, PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn), func_index), 0, &vendor_id); if (vendor_id == 0xffff) break; } nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__, func_index, pci_name(nesdev->pcidev)); nesadapter->adapter_fcn_count = func_index; return nesadapter; } /** * nes_reset_adapter_ne020 */ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode) { u32 port_count; u32 u32temp; u32 i; u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); port_count = ((u32temp & 0x00000300) >> 8) + 1; /* TODO: assuming that both SERDES are set the same for now */ *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1; nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n", u32temp, port_count); if (*OneG_Mode) nes_debug(NES_DBG_INIT, "Running in 1G mode.\n"); u32temp &= 0xff00ffc0; switch (port_count) { case 1: u32temp |= 0x00ee0000; break; case 2: u32temp |= 0x00cc0000; break; case 4: u32temp |= 0x00000000; break; default: return 0; break; } /* check and do full reset if needed */ if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) { nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd); nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd); i = 0; while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) mdelay(1); if (i > 10000) { nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); return 0; } i = 0; while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) mdelay(1); if (i > 10000) { printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); return 0; } } /* port reset */ switch (port_count) { case 1: u32temp |= 0x00ee0010; break; case 2: u32temp |= 0x00cc0030; break; case 4: u32temp |= 0x00000030; break; } nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd); nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd); i = 0; while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) mdelay(1); if (i > 10000) { nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); return 0; } /* serdes 0 */ i = 0; while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); if (i > 5000) { nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); return 0; } /* serdes 1 */ if (port_count > 1) { i = 0; while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); if (i > 5000) { nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); return 0; } } return port_count; } /** * nes_init_serdes */ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, struct nes_adapter *nesadapter, u8 OneG_Mode) { int i; u32 u32temp; u32 sds; if (hw_rev != NE020_REV) { /* init serdes 0 */ switch (nesadapter->phy_type[0]) { case NES_PHY_TYPE_CX4: if (wide_ppm_offset) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); else nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); break; case NES_PHY_TYPE_KR: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000); break; case NES_PHY_TYPE_PUMA_1G: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); sds |= 0x00000100; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); break; default: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); break; } if (!OneG_Mode) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); if (port_count < 2) return 0; /* init serdes 1 */ if (!(OneG_Mode && (nesadapter->phy_type[1] != NES_PHY_TYPE_PUMA_1G))) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); switch (nesadapter->phy_type[1]) { case NES_PHY_TYPE_ARGUS: case NES_PHY_TYPE_SFP_D: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000); break; case NES_PHY_TYPE_CX4: if (wide_ppm_offset) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); break; case NES_PHY_TYPE_KR: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000); break; case NES_PHY_TYPE_PUMA_1G: sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); sds |= 0x000000100; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); } if (!OneG_Mode) { nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); sds &= 0xFFFFFFBF; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); } } else { /* init serdes 0 */ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); i = 0; while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) & 0x0000000f)) != 0x0000000f) && i++ < 5000) mdelay(1); if (i > 5000) { nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); return 1; } nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000); if (OneG_Mode) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222); else nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff); if (port_count > 1) { /* init serdes 1 */ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048); i = 0; while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) mdelay(1); if (i > 5000) { printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); /* return 1; */ } nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff); } } return 0; } /** * nes_init_csr_ne020 * Initialize registers for ne020 hardware */ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count) { u32 u32temp; nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count); nes_write_indexed(nesdev, 0x000001E4, 0x00000007); /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */ nes_write_indexed(nesdev, 0x000001E8, 0x00020874); nes_write_indexed(nesdev, 0x000001D8, 0x00048002); /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */ nes_write_indexed(nesdev, 0x000001FC, 0x00050005); nes_write_indexed(nesdev, 0x00000600, 0x55555555); nes_write_indexed(nesdev, 0x00000604, 0x55555555); /* TODO: move these MAC register settings to NIC bringup */ nes_write_indexed(nesdev, 0x00002000, 0x00000001); nes_write_indexed(nesdev, 0x00002004, 0x00000001); nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF); nes_write_indexed(nesdev, 0x0000200C, 0x00000001); nes_write_indexed(nesdev, 0x00002010, 0x000003c1); nes_write_indexed(nesdev, 0x0000201C, 0x75345678); if (port_count > 1) { nes_write_indexed(nesdev, 0x00002200, 0x00000001); nes_write_indexed(nesdev, 0x00002204, 0x00000001); nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF); nes_write_indexed(nesdev, 0x0000220C, 0x00000001); nes_write_indexed(nesdev, 0x00002210, 0x000003c1); nes_write_indexed(nesdev, 0x0000221C, 0x75345678); nes_write_indexed(nesdev, 0x00000908, 0x20000001); } if (port_count > 2) { nes_write_indexed(nesdev, 0x00002400, 0x00000001); nes_write_indexed(nesdev, 0x00002404, 0x00000001); nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF); nes_write_indexed(nesdev, 0x0000240C, 0x00000001); nes_write_indexed(nesdev, 0x00002410, 0x000003c1); nes_write_indexed(nesdev, 0x0000241C, 0x75345678); nes_write_indexed(nesdev, 0x00000910, 0x20000001); nes_write_indexed(nesdev, 0x00002600, 0x00000001); nes_write_indexed(nesdev, 0x00002604, 0x00000001); nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF); nes_write_indexed(nesdev, 0x0000260C, 0x00000001); nes_write_indexed(nesdev, 0x00002610, 0x000003c1); nes_write_indexed(nesdev, 0x0000261C, 0x75345678); nes_write_indexed(nesdev, 0x00000918, 0x20000001); } nes_write_indexed(nesdev, 0x00005000, 0x00018000); /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */ nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, (wqm_quanta << 1) | 0x00000001); nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F); nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F); nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F); nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F); nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF); /* TODO: move this to code, get from EEPROM */ nes_write_indexed(nesdev, 0x00000900, 0x20000001); nes_write_indexed(nesdev, 0x000060C0, 0x0000028e); nes_write_indexed(nesdev, 0x000060C8, 0x00000020); nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0); /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */ if (hw_rev != NE020_REV) { u32temp = nes_read_indexed(nesdev, 0x000008e8); u32temp |= 0x80000000; nes_write_indexed(nesdev, 0x000008e8, u32temp); u32temp = nes_read_indexed(nesdev, 0x000021f8); u32temp &= 0x7fffffff; u32temp |= 0x7fff0010; nes_write_indexed(nesdev, 0x000021f8, u32temp); if (port_count > 1) { u32temp = nes_read_indexed(nesdev, 0x000023f8); u32temp &= 0x7fffffff; u32temp |= 0x7fff0010; nes_write_indexed(nesdev, 0x000023f8, u32temp); } } } /** * nes_destroy_adapter - destroy the adapter structure */ void nes_destroy_adapter(struct nes_adapter *nesadapter) { struct nes_adapter *tmp_adapter; list_for_each_entry(tmp_adapter, &nes_adapter_list, list) { nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n", tmp_adapter); } nesadapter->ref_count--; if (!nesadapter->ref_count) { if (nesadapter->hw_rev == NE020_REV) { del_timer(&nesadapter->mh_timer); } del_timer(&nesadapter->lc_timer); list_del(&nesadapter->list); kfree(nesadapter); } } /** * nes_init_cqp */ int nes_init_cqp(struct nes_device *nesdev) { struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_cqp_qp_context *cqp_qp_context; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_hw_ceq *ceq; struct nes_hw_ceq *nic_ceq; struct nes_hw_aeq *aeq; void *vmem; dma_addr_t pmem; u32 count=0; u32 cqp_head; u64 u64temp; u32 u32temp; /* allocate CQP memory */ /* Need to add max_cq to the aeq size once cq overflow checking is added back */ /* SQ is 512 byte aligned, others are 256 byte aligned */ nesdev->cqp_mem_size = 512 + (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) + (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) + max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) + max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) + (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) + sizeof(struct nes_hw_cqp_qp_context); nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size, &nesdev->cqp_pbase); if (!nesdev->cqp_vbase) { nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n"); return -ENOMEM; } memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size); /* Allocate a twice the number of CQP requests as the SQ size */ nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) * 2 * NES_CQP_SQ_SIZE, GFP_KERNEL); if (nesdev->nes_cqp_requests == NULL) { nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n"); pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase, nesdev->cqp.sq_pbase); return -ENOMEM; } nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n", nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size); spin_lock_init(&nesdev->cqp.lock); init_waitqueue_head(&nesdev->cqp.waitq); /* Setup Various Structures */ vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) & ~(unsigned long)(512 - 1)); pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) & ~(unsigned long long)(512 - 1)); nesdev->cqp.sq_vbase = vmem; nesdev->cqp.sq_pbase = pmem; nesdev->cqp.sq_size = NES_CQP_SQ_SIZE; nesdev->cqp.sq_head = 0; nesdev->cqp.sq_tail = 0; nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn); vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size); pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size); nesdev->ccq.cq_vbase = vmem; nesdev->ccq.cq_pbase = pmem; nesdev->ccq.cq_size = NES_CCQ_SIZE; nesdev->ccq.cq_head = 0; nesdev->ccq.ce_handler = nes_cqp_ce_handler; nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn); vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size); pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size); nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn); ceq = &nesadapter->ceq[nesdev->ceq_index]; ceq->ceq_vbase = vmem; ceq->ceq_pbase = pmem; ceq->ceq_size = NES_CCEQ_SIZE; ceq->ceq_head = 0; vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256); pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256); nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8; nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index]; nic_ceq->ceq_vbase = vmem; nic_ceq->ceq_pbase = pmem; nic_ceq->ceq_size = NES_NIC_CEQ_SIZE; nic_ceq->ceq_head = 0; vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256); pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256); aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]; aeq->aeq_vbase = vmem; aeq->aeq_pbase = pmem; aeq->aeq_size = nesadapter->max_qp; aeq->aeq_head = 0; /* Setup QP Context */ vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size); pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size); cqp_qp_context = vmem; cqp_qp_context->context_words[0] = cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10)); cqp_qp_context->context_words[1] = 0; cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase); cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32); /* Write the address to Create CQP */ if ((sizeof(dma_addr_t) > 4)) { nes_write_indexed(nesdev, NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), ((u64)pmem) >> 32); } else { nes_write_indexed(nesdev, NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0); } nes_write_indexed(nesdev, NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8), (u32)pmem); INIT_LIST_HEAD(&nesdev->cqp_avail_reqs); INIT_LIST_HEAD(&nesdev->cqp_pending_reqs); for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) { init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq); list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs); } /* Write Create CCQ WQE */ cqp_head = nesdev->cqp.sq_head++; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, (nesdev->ccq.cq_number | ((u32)nesdev->ceq_index << 16))); u64temp = (u64)nesdev->ccq.cq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; u64temp = (unsigned long)&nesdev->ccq; cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1)); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; /* Write Create CEQ WQE */ cqp_head = nesdev->cqp.sq_head++; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size); u64temp = (u64)ceq->ceq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); /* Write Create AEQ WQE */ cqp_head = nesdev->cqp.sq_head++; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size); u64temp = (u64)aeq->aeq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); /* Write Create NIC CEQ WQE */ cqp_head = nesdev->cqp.sq_head++; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size); u64temp = (u64)nic_ceq->ceq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); /* Poll until CCQP done */ count = 0; do { if (count++ > 1000) { printk(KERN_ERR PFX "Error creating CQP\n"); pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp_vbase, nesdev->cqp_pbase); return -1; } udelay(10); } while (!(nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8))); nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); u32temp = 0x04800000; nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id); /* wait for the CCQ, CEQ, and AEQ to get created */ count = 0; do { if (count++ > 1000) { printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n"); pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp_vbase, nesdev->cqp_pbase); return -1; } udelay(10); } while (((nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8))); /* dump the QP status value */ nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); nesdev->cqp.sq_tail++; return 0; } /** * nes_destroy_cqp */ int nes_destroy_cqp(struct nes_device *nesdev) { struct nes_hw_cqp_wqe *cqp_wqe; u32 count = 0; u32 cqp_head; unsigned long flags; do { if (count++ > 1000) break; udelay(10); } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail)); /* Reset CCQ */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET | nesdev->ccq.cq_number); /* Disable device interrupts */ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff); spin_lock_irqsave(&nesdev->cqp.lock, flags); /* Destroy the AEQ */ cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)); cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0; /* Destroy the NIC CEQ */ cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ | ((u32)nesdev->nic_ceq_index << 8)); /* Destroy the CEQ */ cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ | (nesdev->ceq_index << 8)); /* Destroy the CCQ */ cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number | ((u32)nesdev->ceq_index << 16)); /* Destroy CQP */ cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_CQP); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id); barrier(); /* Ring doorbell (5 WQEs) */ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); /* wait for the CCQ, CEQ, and AEQ to get destroyed */ count = 0; do { if (count++ > 1000) { printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n", PCI_FUNC(nesdev->pcidev->devfn)); break; } udelay(10); } while (((nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0)); /* dump the QP status value */ nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n", PCI_FUNC(nesdev->pcidev->devfn), nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); kfree(nesdev->nes_cqp_requests); /* Free the control structures */ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase, nesdev->cqp.sq_pbase); return 0; } /** * nes_init_1g_phy */ static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) { u32 counter = 0; u16 phy_data; int ret = 0; nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); /* Reset the PHY */ nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); udelay(100); counter = 0; do { nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); if (counter++ > 100) { ret = -1; break; } } while (phy_data & 0x8000); /* Setting no phy loopback */ phy_data &= 0xbfff; phy_data |= 0x1140; nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); /* Setting the interrupt mask */ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); /* turning on flow control */ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00); nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); /* Clear Half duplex */ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); return ret; } /** * nes_init_2025_phy */ static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) { u32 temp_phy_data = 0; u32 temp_phy_data2 = 0; u32 counter = 0; u32 sds; u32 mac_index = nesdev->mac_index; int ret = 0; unsigned int first_attempt = 1; /* Check firmware heartbeat */ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); udelay(1500); nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); if (temp_phy_data != temp_phy_data2) { nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); if ((temp_phy_data & 0xff) > 0x20) return 0; printk(PFX "Reinitialize external PHY\n"); } /* no heartbeat, configure the PHY */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); switch (phy_type) { case NES_PHY_TYPE_ARGUS: nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); /* setup LEDs */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); break; case NES_PHY_TYPE_SFP_D: nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); /* setup LEDs */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); break; case NES_PHY_TYPE_KR: nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); /* setup LEDs */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020); break; } nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); /* Bring PHY out of reset */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); /* Check for heartbeat */ counter = 0; mdelay(690); nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); do { if (counter++ > 150) { printk(PFX "No PHY heartbeat\n"); break; } mdelay(1); nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); } while ((temp_phy_data2 == temp_phy_data)); /* wait for tracking */ counter = 0; do { nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); if (counter++ > 300) { if (((temp_phy_data & 0xff) == 0x0) && first_attempt) { first_attempt = 0; counter = 0; /* reset AMCC PHY and try again */ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040); continue; } else { ret = 1; break; } } mdelay(10); } while ((temp_phy_data & 0xff) < 0x30); /* setup signal integrity */ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032); if (phy_type == NES_PHY_TYPE_KR) { nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C); } else { nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); } /* reset serdes */ sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200); sds |= 0x1; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds); sds &= 0xfffffffe; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds); counter = 0; while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (counter++ < 5000)) ; return ret; } /** * nes_init_phy */ int nes_init_phy(struct nes_device *nesdev) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 mac_index = nesdev->mac_index; u32 tx_config = 0; unsigned long flags; u8 phy_type = nesadapter->phy_type[mac_index]; u8 phy_index = nesadapter->phy_index[mac_index]; int ret = 0; tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); if (phy_type == NES_PHY_TYPE_1G) { /* setup 1G MDIO operation */ tx_config &= 0xFFFFFFE3; tx_config |= 0x04; } else { /* setup 10G MDIO operation */ tx_config &= 0xFFFFFFE3; tx_config |= 0x1D; } nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); switch (phy_type) { case NES_PHY_TYPE_1G: ret = nes_init_1g_phy(nesdev, phy_type, phy_index); break; case NES_PHY_TYPE_ARGUS: case NES_PHY_TYPE_SFP_D: case NES_PHY_TYPE_KR: ret = nes_init_2025_phy(nesdev, phy_type, phy_index); break; } spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); return ret; } /** * nes_replenish_nic_rq */ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic) { unsigned long flags; dma_addr_t bus_address; struct sk_buff *skb; struct nes_hw_nic_rq_wqe *nic_rqe; struct nes_hw_nic *nesnic; struct nes_device *nesdev; struct nes_rskb_cb *cb; u32 rx_wqes_posted = 0; nesnic = &nesvnic->nic; nesdev = nesvnic->nesdev; spin_lock_irqsave(&nesnic->rq_lock, flags); if (nesnic->replenishing_rq !=0) { if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) && (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) { atomic_set(&nesvnic->rx_skb_timer_running, 1); spin_unlock_irqrestore(&nesnic->rq_lock, flags); nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */ add_timer(&nesvnic->rq_wqes_timer); } else spin_unlock_irqrestore(&nesnic->rq_lock, flags); return; } nesnic->replenishing_rq = 1; spin_unlock_irqrestore(&nesnic->rq_lock, flags); do { skb = dev_alloc_skb(nesvnic->max_frame_size); if (skb) { skb->dev = nesvnic->netdev; bus_address = pci_map_single(nesdev->pcidev, skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->busaddr = bus_address; cb->maplen = nesvnic->max_frame_size; nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head]; nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)bus_address); nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)bus_address >> 32)); nesnic->rx_skb[nesnic->rq_head] = skb; nesnic->rq_head++; nesnic->rq_head &= nesnic->rq_size - 1; atomic_dec(&nesvnic->rx_skbs_needed); barrier(); if (++rx_wqes_posted == 255) { nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id); rx_wqes_posted = 0; } } else { spin_lock_irqsave(&nesnic->rq_lock, flags); if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) && (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) { atomic_set(&nesvnic->rx_skb_timer_running, 1); spin_unlock_irqrestore(&nesnic->rq_lock, flags); nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */ add_timer(&nesvnic->rq_wqes_timer); } else spin_unlock_irqrestore(&nesnic->rq_lock, flags); break; } } while (atomic_read(&nesvnic->rx_skbs_needed)); barrier(); if (rx_wqes_posted) nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id); nesnic->replenishing_rq = 0; } /** * nes_rq_wqes_timeout */ static void nes_rq_wqes_timeout(unsigned long parm) { struct nes_vnic *nesvnic = (struct nes_vnic *)parm; printk("%s: Timer fired.\n", __func__); atomic_set(&nesvnic->rx_skb_timer_running, 0); if (atomic_read(&nesvnic->rx_skbs_needed)) nes_replenish_nic_rq(nesvnic); } static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv) { unsigned int ip_len; struct iphdr *iph; skb_reset_network_header(skb); iph = ip_hdr(skb); if (iph->protocol != IPPROTO_TCP) return -1; ip_len = ip_hdrlen(skb); skb_set_transport_header(skb, ip_len); *tcph = tcp_hdr(skb); *hdr_flags = LRO_IPV4 | LRO_TCP; *iphdr = iph; return 0; } /** * nes_init_nic_qp */ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) { struct nes_hw_cqp_wqe *cqp_wqe; struct nes_hw_nic_sq_wqe *nic_sqe; struct nes_hw_nic_qp_context *nic_context; struct sk_buff *skb; struct nes_hw_nic_rq_wqe *nic_rqe; struct nes_vnic *nesvnic = netdev_priv(netdev); unsigned long flags; void *vmem; dma_addr_t pmem; u64 u64temp; int ret; u32 cqp_head; u32 counter; u32 wqe_count; struct nes_rskb_cb *cb; u8 jumbomode=0; /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */ nesvnic->nic_mem_size = 256 + (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) + (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) + (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) + (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) + sizeof(struct nes_hw_nic_qp_context); nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size, &nesvnic->nic_pbase); if (!nesvnic->nic_vbase) { nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n"); return -ENOMEM; } memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size); nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n", nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size); vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) & ~(unsigned long)(256 - 1)); pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) & ~(unsigned long long)(256 - 1)); /* Setup the first Fragment buffers */ nesvnic->nic.first_frag_vbase = vmem; for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) { nesvnic->nic.frag_paddr[counter] = pmem; pmem += sizeof(struct nes_first_frag); } /* setup the SQ */ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)); nesvnic->nic.sq_vbase = (void *)vmem; nesvnic->nic.sq_pbase = pmem; nesvnic->nic.sq_head = 0; nesvnic->nic.sq_tail = 0; nesvnic->nic.sq_size = NES_NIC_WQ_SIZE; for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) { nic_sqe = &nesvnic->nic.sq_vbase[counter]; nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION); nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] = cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16); nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]); nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32)); } nesvnic->get_cqp_request = nes_get_cqp_request; nesvnic->post_cqp_request = nes_post_cqp_request; nesvnic->mcrq_mcast_filter = NULL; spin_lock_init(&nesvnic->nic.rq_lock); /* setup the RQ */ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)); pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)); nesvnic->nic.rq_vbase = vmem; nesvnic->nic.rq_pbase = pmem; nesvnic->nic.rq_head = 0; nesvnic->nic.rq_tail = 0; nesvnic->nic.rq_size = NES_NIC_WQ_SIZE; /* setup the CQ */ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)); pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)); if (nesdev->nesadapter->netdev_count > 2) nesvnic->mcrq_qp_id = nesvnic->nic_index + 32; else nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4; nesvnic->nic_cq.cq_vbase = vmem; nesvnic->nic_cq.cq_pbase = pmem; nesvnic->nic_cq.cq_head = 0; nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2; nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler; /* Send CreateCQ request to CQP */ spin_lock_irqsave(&nesdev->cqp.lock, flags); cqp_head = nesdev->cqp.sq_head; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | ((u32)nesvnic->nic_cq.cq_size << 16)); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32( nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)); u64temp = (u64)nesvnic->nic_cq.cq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; u64temp = (unsigned long)&nesvnic->nic_cq; cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1)); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); /* Send CreateQP request to CQP */ nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]); nic_context->context_words[NES_NIC_CTX_MISC_IDX] = cpu_to_le32((u32)NES_NIC_CTX_SIZE | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12)); nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n", nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE), nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE)); if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) { nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE); } u64temp = (u64)nesvnic->nic.sq_pbase; nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp); nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); u64temp = (u64)nesvnic->nic.rq_pbase; nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp); nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_NIC); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id); u64temp = (u64)nesvnic->nic_cq.cq_pbase + (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe)); set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; nesdev->cqp.sq_head = cqp_head; barrier(); /* Ring doorbell (2 WQEs) */ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n", nesvnic->nic.qp_id); ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n", nesvnic->nic.qp_id, ret); if (!ret) { nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id); pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase, nesvnic->nic_pbase); return -EIO; } /* Populate the RQ */ for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) { skb = dev_alloc_skb(nesvnic->max_frame_size); if (!skb) { nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name); nes_destroy_nic_qp(nesvnic); return -ENOMEM; } skb->dev = netdev; pmem = pci_map_single(nesdev->pcidev, skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->busaddr = pmem; cb->maplen = nesvnic->max_frame_size; nic_rqe = &nesvnic->nic.rq_vbase[counter]; nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem); nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32)); nesvnic->nic.rx_skb[counter] = skb; } wqe_count = NES_NIC_WQ_SIZE - 1; nesvnic->nic.rq_head = wqe_count; barrier(); do { counter = min(wqe_count, ((u32)255)); wqe_count -= counter; nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id); } while (wqe_count); init_timer(&nesvnic->rq_wqes_timer); nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout; nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic; nes_debug(NES_DBG_INIT, "NAPI support Enabled\n"); if (nesdev->nesadapter->et_use_adaptive_rx_coalesce) { nes_nic_init_timer(nesdev); if (netdev->mtu > 1500) jumbomode = 1; nes_nic_init_timer_defaults(nesdev, jumbomode); } if ((nesdev->nesadapter->allow_unaligned_fpdus) && (nes_init_mgt_qp(nesdev, netdev, nesvnic))) { nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name); nes_destroy_nic_qp(nesvnic); return -ENOMEM; } nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr; nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; nesvnic->lro_mgr.dev = netdev; nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; return 0; } /** * nes_destroy_nic_qp */ void nes_destroy_nic_qp(struct nes_vnic *nesvnic) { u64 u64temp; dma_addr_t bus_address; struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_hw_nic_sq_wqe *nic_sqe; __le16 *wqe_fragment_length; u16 wqe_fragment_index; u32 cqp_head; u32 wqm_cfg0; unsigned long flags; struct sk_buff *rx_skb; struct nes_rskb_cb *cb; int ret; if (nesdev->nesadapter->allow_unaligned_fpdus) nes_destroy_mgt(nesvnic); /* clear wqe stall before destroying NIC QP */ wqm_cfg0 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG0); nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0 & 0xFFFF7FFF); /* Free remaining NIC receive buffers */ while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { rx_skb = nesvnic->nic.rx_skb[nesvnic->nic.rq_tail]; cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE); dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); } /* Free remaining NIC transmit buffers */ while (nesvnic->nic.sq_head != nesvnic->nic.sq_tail) { nic_sqe = &nesvnic->nic.sq_vbase[nesvnic->nic.sq_tail]; wqe_fragment_index = 1; wqe_fragment_length = (__le16 *) &nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* bump past the vlan tag */ wqe_fragment_length++; if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) { u64temp = (u64)le32_to_cpu( nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+ wqe_fragment_index*2]); u64temp += ((u64)le32_to_cpu( nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX + wqe_fragment_index*2]))<<32; bus_address = (dma_addr_t)u64temp; if (test_and_clear_bit(nesvnic->nic.sq_tail, nesvnic->nic.first_frag_overflow)) { pci_unmap_single(nesdev->pcidev, bus_address, le16_to_cpu(wqe_fragment_length[ wqe_fragment_index++]), PCI_DMA_TODEVICE); } for (; wqe_fragment_index < 5; wqe_fragment_index++) { if (wqe_fragment_length[wqe_fragment_index]) { u64temp = le32_to_cpu( nic_sqe->wqe_words[ NES_NIC_SQ_WQE_FRAG0_LOW_IDX+ wqe_fragment_index*2]); u64temp += ((u64)le32_to_cpu( nic_sqe->wqe_words[ NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+ wqe_fragment_index*2]))<<32; bus_address = (dma_addr_t)u64temp; pci_unmap_page(nesdev->pcidev, bus_address, le16_to_cpu( wqe_fragment_length[ wqe_fragment_index]), PCI_DMA_TODEVICE); } else break; } } if (nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]) dev_kfree_skb( nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]); nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1) & (nesvnic->nic.sq_size - 1); } spin_lock_irqsave(&nesdev->cqp.lock, flags); /* Destroy NIC QP */ cqp_head = nesdev->cqp.sq_head; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesvnic->nic.qp_id); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; /* Destroy NIC CQ */ nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16))); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; nesdev->cqp.sq_head = cqp_head; barrier(); /* Ring doorbell (2 WQEs) */ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u," " cqp.sq_tail=%u, cqp.sq_size=%u\n", cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size); ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u," " cqp.sq_head=%u, cqp.sq_tail=%u\n", ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail); if (!ret) { nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n", nesvnic->nic.qp_id); } pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase, nesvnic->nic_pbase); /* restore old wqm_cfg0 value */ nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0); } /** * nes_napi_isr */ int nes_napi_isr(struct nes_device *nesdev) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 int_stat; if (nesdev->napi_isr_ran) { /* interrupt status has already been read in ISR */ int_stat = nesdev->int_stat; } else { int_stat = nes_read32(nesdev->regs + NES_INT_STAT); nesdev->int_stat = int_stat; nesdev->napi_isr_ran = 1; } int_stat &= nesdev->int_req; /* iff NIC, process here, else wait for DPC */ if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) { nesdev->napi_isr_ran = 0; nes_write32(nesdev->regs + NES_INT_STAT, (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 | NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3))); /* Process the CEQs */ nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]); if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) && (!nesadapter->et_use_adaptive_rx_coalesce)) || ((nesadapter->et_use_adaptive_rx_coalesce) && (nesdev->deepcq_count > nesadapter->et_pkt_rate_low))))) { if ((nesdev->int_req & NES_INT_TIMER) == 0) { /* Enable Periodic timer interrupts */ nesdev->int_req |= NES_INT_TIMER; /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */ /* TODO: need to also ack other unused periodic timer values, get from nesadapter */ nes_write32(nesdev->regs+NES_TIMER_STAT, nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER)); } if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) { nes_nic_init_timer(nesdev); } /* Enable interrupts, except CEQs */ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); } else { /* Enable interrupts, make sure timer is off */ nesdev->int_req &= ~NES_INT_TIMER; nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); } nesdev->deepcq_count = 0; return 1; } else { return 0; } } static void process_critical_error(struct nes_device *nesdev) { u32 debug_error; u32 nes_idx_debug_error_masks0 = 0; u16 error_module = 0; debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS); printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n", (u16)debug_error); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS, 0x01010000 | (debug_error & 0x0000ffff)); if (crit_err_count++ > 10) nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17); error_module = (u16) (debug_error & 0x1F00) >> 8; if (++nesdev->nesadapter->crit_error_count[error_module-1] >= nes_max_critical_error_count) { printk(KERN_ERR PFX "Masking off critical error for module " "0x%02X\n", (u16)error_module); nes_idx_debug_error_masks0 = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, nes_idx_debug_error_masks0 | (1 << error_module)); } } /** * nes_dpc */ void nes_dpc(unsigned long param) { struct nes_device *nesdev = (struct nes_device *)param; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 counter; u32 loop_counter = 0; u32 int_status_bit; u32 int_stat; u32 timer_stat; u32 temp_int_stat; u32 intf_int_stat; u32 processed_intf_int = 0; u16 processed_timer_int = 0; u16 completion_ints = 0; u16 timer_ints = 0; /* nes_debug(NES_DBG_ISR, "\n"); */ do { timer_stat = 0; if (nesdev->napi_isr_ran) { nesdev->napi_isr_ran = 0; int_stat = nesdev->int_stat; } else int_stat = nes_read32(nesdev->regs+NES_INT_STAT); if (processed_intf_int != 0) int_stat &= nesdev->int_req & ~NES_INT_INTF; else int_stat &= nesdev->int_req; if (processed_timer_int == 0) { processed_timer_int = 1; if (int_stat & NES_INT_TIMER) { timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT); if ((timer_stat & nesdev->timer_int_req) == 0) { int_stat &= ~NES_INT_TIMER; } } } else { int_stat &= ~NES_INT_TIMER; } if (int_stat) { if (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0| NES_INT_MAC1|NES_INT_MAC2 | NES_INT_MAC3)) { /* Ack the interrupts */ nes_write32(nesdev->regs+NES_INT_STAT, (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0| NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3))); } temp_int_stat = int_stat; for (counter = 0, int_status_bit = 1; counter < 16; counter++) { if (int_stat & int_status_bit) { nes_process_ceq(nesdev, &nesadapter->ceq[counter]); temp_int_stat &= ~int_status_bit; completion_ints = 1; } if (!(temp_int_stat & 0x0000ffff)) break; int_status_bit <<= 1; } /* Process the AEQ for this pci function */ int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn)); if (int_stat & int_status_bit) { nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]); } /* Process the MAC interrupt for this pci function */ int_status_bit = 1 << (24 + nesdev->mac_index); if (int_stat & int_status_bit) { nes_process_mac_intr(nesdev, nesdev->mac_index); } if (int_stat & NES_INT_TIMER) { if (timer_stat & nesdev->timer_int_req) { nes_write32(nesdev->regs + NES_TIMER_STAT, (timer_stat & nesdev->timer_int_req) | ~(nesdev->nesadapter->timer_int_req)); timer_ints = 1; } } if (int_stat & NES_INT_INTF) { processed_intf_int = 1; intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); intf_int_stat &= nesdev->intf_int_req; if (NES_INTF_INT_CRITERR & intf_int_stat) { process_critical_error(nesdev); } if (NES_INTF_INT_PCIERR & intf_int_stat) { printk(KERN_ERR PFX "PCI Error reported by device!!!\n"); BUG(); } if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) { printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n"); BUG(); } nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat); } if (int_stat & NES_INT_TSW) { } } /* Don't use the interface interrupt bit stay in loop */ int_stat &= ~NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 | NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3; } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS)); if (timer_ints == 1) { if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) { if (completion_ints == 0) { nesdev->timer_only_int_count++; if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) { nesdev->timer_only_int_count = 0; nesdev->int_req &= ~NES_INT_TIMER; nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); nes_write32(nesdev->regs + NES_INT_MASK, ~nesdev->int_req); } else { nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); } } else { if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) { nes_nic_init_timer(nesdev); } nesdev->timer_only_int_count = 0; nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); } } else { nesdev->timer_only_int_count = 0; nesdev->int_req &= ~NES_INT_TIMER; nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); nes_write32(nesdev->regs+NES_TIMER_STAT, nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); } } else { if ( (completion_ints == 1) && (((nesadapter->et_rx_coalesce_usecs_irq) && (!nesadapter->et_use_adaptive_rx_coalesce)) || ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) && (nesadapter->et_use_adaptive_rx_coalesce) )) ) { /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */ nesdev->timer_only_int_count = 0; nesdev->int_req |= NES_INT_TIMER; nes_write32(nesdev->regs+NES_TIMER_STAT, nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER)); nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); } else { nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); } } nesdev->deepcq_count = 0; } /** * nes_process_ceq */ static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq) { u64 u64temp; struct nes_hw_cq *cq; u32 head; u32 ceq_size; /* nes_debug(NES_DBG_CQ, "\n"); */ head = ceq->ceq_head; ceq_size = ceq->ceq_size; do { if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) & NES_CEQE_VALID) { u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]))) << 32) | ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX]))); u64temp <<= 1; cq = *((struct nes_hw_cq **)&u64temp); /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */ barrier(); ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0; /* call the event handler */ cq->ce_handler(nesdev, cq); if (++head >= ceq_size) head = 0; } else { break; } } while (1); ceq->ceq_head = head; } /** * nes_process_aeq */ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) { /* u64 u64temp; */ u32 head; u32 aeq_size; u32 aeqe_misc; u32 aeqe_cq_id; struct nes_hw_aeqe volatile *aeqe; head = aeq->aeq_head; aeq_size = aeq->aeq_size; do { aeqe = &aeq->aeq_vbase[head]; if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0) break; aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) { if (aeqe_cq_id >= NES_FIRST_QPN) { /* dealing with an accelerated QP related AE */ /* * u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX]))) << 32) | * ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]))); */ nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe); } else { /* TODO: dealing with a CQP related AE */ nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n", (u16)(aeqe_misc >> 16)); } } aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0; if (++head >= aeq_size) head = 0; nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16); } while (1); aeq->aeq_head = head; } static void nes_reset_link(struct nes_device *nesdev, u32 mac_index) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 reset_value; u32 i=0; u32 u32temp; if (nesadapter->hw_rev == NE020_REV) { return; } mh_detected++; reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode))) reset_value |= 0x0000001d; else reset_value |= 0x0000002d; if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) { if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) { nesadapter->link_interrupt_count[0] = 0; nesadapter->link_interrupt_count[1] = 0; u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); if (0x00000040 & u32temp) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); else nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8); reset_value |= 0x0000003d; } nesadapter->link_interrupt_count[mac_index] = 0; } nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (i++ < 5000)); if (0x0000003d == (reset_value & 0x0000003d)) { u32 pcs_control_status0, pcs_control_status1; for (i = 0; i < 10; i++) { pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); if (((0x0F000000 == (pcs_control_status0 & 0x0F000000)) && (pcs_control_status0 & 0x00100000)) || ((0x0F000000 == (pcs_control_status1 & 0x0F000000)) && (pcs_control_status1 & 0x00100000))) continue; else break; } if (10 == i) { u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); if (0x00000040 & u32temp) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); else nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8); nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (i++ < 5000)); } } } /** * nes_process_mac_intr */ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) { unsigned long flags; u32 pcs_control_status; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_vnic *nesvnic; u32 mac_status; u32 mac_index = nesdev->mac_index; u32 u32temp; u16 phy_data; u16 temp_phy_data; u32 pcs_val = 0x0f0f0000; u32 pcs_mask = 0x0f1f0000; u32 cdr_ctrl; spin_lock_irqsave(&nesadapter->phy_lock, flags); if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { spin_unlock_irqrestore(&nesadapter->phy_lock, flags); return; } nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; /* ack the MAC interrupt */ mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); /* Clear the interrupt */ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status); nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status); if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { nesdev->link_status_interrupts++; if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) nes_reset_link(nesdev, mac_index); /* read the PHY interrupt status register */ if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { do { nes_read_1G_phy_reg(nesdev, 0x1a, nesadapter->phy_index[mac_index], &phy_data); nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n", nesadapter->phy_index[mac_index], phy_data); } while (phy_data&0x8000); temp_phy_data = 0; do { nes_read_1G_phy_reg(nesdev, 0x11, nesadapter->phy_index[mac_index], &phy_data); nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n", nesadapter->phy_index[mac_index], phy_data); if (temp_phy_data == phy_data) break; temp_phy_data = phy_data; } while (1); nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data); nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n", nesadapter->phy_index[mac_index], phy_data); nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n", nesadapter->phy_index[mac_index], phy_data); if (temp_phy_data & 0x1000) { nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n"); phy_data = 4; } else { nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n"); } } nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n", nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0), nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200)); if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_PUMA_1G) { switch (mac_index) { case 1: case 3: pcs_control_status = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); break; default: pcs_control_status = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); break; } } else { pcs_control_status = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200)); pcs_control_status = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200)); } nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n", mac_index, pcs_control_status); if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { u32temp = 0x01010000; if (nesadapter->port_count > 2) { u32temp |= 0x02020000; } if ((pcs_control_status & u32temp)!= u32temp) { phy_data = 0; nes_debug(NES_DBG_PHY, "PCS says the link is down\n"); } } else { switch (nesadapter->phy_type[mac_index]) { case NES_PHY_TYPE_ARGUS: case NES_PHY_TYPE_SFP_D: case NES_PHY_TYPE_KR: /* clear the alarms */ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc002); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc005); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc006); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005); /* check link status */ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); break; case NES_PHY_TYPE_PUMA_1G: if (mac_index < 2) pcs_val = pcs_mask = 0x01010000; else pcs_val = pcs_mask = 0x02020000; /* fall through */ default: phy_data = (pcs_val == (pcs_control_status & pcs_mask)) ? 0x4 : 0x0; break; } } if (phy_data & 0x0004) { if (wide_ppm_offset && (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && (nesadapter->hw_rev != NE020_REV)) { cdr_ctrl = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0 + mac_index * 0x200); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0 + mac_index * 0x200, cdr_ctrl | 0x000F0000); } nesadapter->mac_link_down[mac_index] = 0; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", nesvnic->linkup); if (nesvnic->linkup == 0) { printk(PFX "The Link is now up for port %s, netdev %p.\n", nesvnic->netdev->name, nesvnic->netdev); if (netif_queue_stopped(nesvnic->netdev)) netif_start_queue(nesvnic->netdev); nesvnic->linkup = 1; netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); if (nesvnic->of_device_registered) { if (nesdev->iw_status == 0) { nesdev->iw_status = 1; nes_port_ibevent(nesvnic); } } spin_unlock(&nesvnic->port_ibevent_lock); } } } else { if (wide_ppm_offset && (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && (nesadapter->hw_rev != NE020_REV)) { cdr_ctrl = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0 + mac_index * 0x200); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0 + mac_index * 0x200, cdr_ctrl & 0xFFF0FFFF); } nesadapter->mac_link_down[mac_index] = 1; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", nesvnic->linkup); if (nesvnic->linkup == 1) { printk(PFX "The Link is now down for port %s, netdev %p.\n", nesvnic->netdev->name, nesvnic->netdev); if (!(netif_queue_stopped(nesvnic->netdev))) netif_stop_queue(nesvnic->netdev); nesvnic->linkup = 0; netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); if (nesvnic->of_device_registered) { if (nesdev->iw_status == 1) { nesdev->iw_status = 0; nes_port_ibevent(nesvnic); } } spin_unlock(&nesvnic->port_ibevent_lock); } } } if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { if (nesdev->link_recheck) cancel_delayed_work(&nesdev->work); nesdev->link_recheck = 1; schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); } } spin_unlock_irqrestore(&nesadapter->phy_lock, flags); nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; } void nes_recheck_link_status(struct work_struct *work) { unsigned long flags; struct nes_device *nesdev = container_of(work, struct nes_device, work.work); struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_vnic *nesvnic; u32 mac_index = nesdev->mac_index; u16 phy_data; u16 temp_phy_data; spin_lock_irqsave(&nesadapter->phy_lock, flags); /* check link status */ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); if (phy_data & 0x0004) { nesadapter->mac_link_down[mac_index] = 0; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { if (nesvnic->linkup == 0) { printk(PFX "The Link is now up for port %s, netdev %p.\n", nesvnic->netdev->name, nesvnic->netdev); if (netif_queue_stopped(nesvnic->netdev)) netif_start_queue(nesvnic->netdev); nesvnic->linkup = 1; netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); if (nesvnic->of_device_registered) { if (nesdev->iw_status == 0) { nesdev->iw_status = 1; nes_port_ibevent(nesvnic); } } spin_unlock(&nesvnic->port_ibevent_lock); } } } else { nesadapter->mac_link_down[mac_index] = 1; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { if (nesvnic->linkup == 1) { printk(PFX "The Link is now down for port %s, netdev %p.\n", nesvnic->netdev->name, nesvnic->netdev); if (!(netif_queue_stopped(nesvnic->netdev))) netif_stop_queue(nesvnic->netdev); nesvnic->linkup = 0; netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); if (nesvnic->of_device_registered) { if (nesdev->iw_status == 1) { nesdev->iw_status = 0; nes_port_ibevent(nesvnic); } } spin_unlock(&nesvnic->port_ibevent_lock); } } } if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX) schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); else nesdev->link_recheck = 0; spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) { struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); napi_schedule(&nesvnic->napi); } /* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before * getting out of nic_ce_handler */ #define MAX_RQES_TO_PROCESS 384 /** * nes_nic_ce_handler */ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) { u64 u64temp; dma_addr_t bus_address; struct nes_hw_nic *nesnic; struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_nic_rq_wqe *nic_rqe; struct nes_hw_nic_sq_wqe *nic_sqe; struct sk_buff *skb; struct sk_buff *rx_skb; struct nes_rskb_cb *cb; __le16 *wqe_fragment_length; u32 head; u32 cq_size; u32 rx_pkt_size; u32 cqe_count=0; u32 cqe_errv; u32 cqe_misc; u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */ u16 vlan_tag; u16 pkt_type; u16 rqes_processed = 0; u8 sq_cqes = 0; u8 nes_use_lro = 0; head = cq->cq_head; cq_size = cq->cq_size; cq->cqes_pending = 1; if (nesvnic->netdev->features & NETIF_F_LRO) nes_use_lro = 1; do { if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) & NES_NIC_CQE_VALID) { nesnic = &nesvnic->nic; cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]); if (cqe_misc & NES_NIC_CQE_SQ) { sq_cqes++; wqe_fragment_index = 1; nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail]; skb = nesnic->tx_skb[nesnic->sq_tail]; wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* bump past the vlan tag */ wqe_fragment_length++; if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) { u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX + wqe_fragment_index * 2]); u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX + wqe_fragment_index * 2])) << 32; bus_address = (dma_addr_t)u64temp; if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) { pci_unmap_single(nesdev->pcidev, bus_address, le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]), PCI_DMA_TODEVICE); } for (; wqe_fragment_index < 5; wqe_fragment_index++) { if (wqe_fragment_length[wqe_fragment_index]) { u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX + wqe_fragment_index * 2]); u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX + wqe_fragment_index * 2])) <<32; bus_address = (dma_addr_t)u64temp; pci_unmap_page(nesdev->pcidev, bus_address, le16_to_cpu(wqe_fragment_length[wqe_fragment_index]), PCI_DMA_TODEVICE); } else break; } } if (skb) dev_kfree_skb_any(skb); nesnic->sq_tail++; nesnic->sq_tail &= nesnic->sq_size-1; if (sq_cqes > 128) { barrier(); /* restart the queue if it had been stopped */ if (netif_queue_stopped(nesvnic->netdev)) netif_wake_queue(nesvnic->netdev); sq_cqes = 0; } } else { rqes_processed ++; cq->rx_cqes_completed++; cq->rx_pkts_indicated++; rx_pkt_size = cqe_misc & 0x0000ffff; nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail]; /* Get the skb */ rx_skb = nesnic->rx_skb[nesnic->rq_tail]; nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail]; bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; pci_unmap_single(nesdev->pcidev, bus_address, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; cb->busaddr = 0; /* rx_skb->tail = rx_skb->data + rx_pkt_size; */ /* rx_skb->len = rx_pkt_size; */ rx_skb->len = 0; /* TODO: see if this is necessary */ skb_put(rx_skb, rx_pkt_size); rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev); nesnic->rq_tail++; nesnic->rq_tail &= nesnic->rq_size - 1; atomic_inc(&nesvnic->rx_skbs_needed); if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) { nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16)); /* nesadapter->tune_timer.cq_count += cqe_count; */ nesdev->currcq_count += cqe_count; cqe_count = 0; nes_replenish_nic_rq(nesvnic); } pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])); cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT; rx_skb->ip_summed = CHECKSUM_NONE; if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) || (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) { if ((cqe_errv & (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { if (nesvnic->netdev->features & NETIF_F_RXCSUM) rx_skb->ip_summed = CHECKSUM_UNNECESSARY; } else nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet." " errv = 0x%X, pkt_type = 0x%X.\n", nesvnic->netdev->name, cqe_errv, pkt_type); } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) { if ((cqe_errv & (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { if (nesvnic->netdev->features & NETIF_F_RXCSUM) { rx_skb->ip_summed = CHECKSUM_UNNECESSARY; /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n", nesvnic->netdev->name); */ } } else nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet." " errv = 0x%X, pkt_type = 0x%X.\n", nesvnic->netdev->name, cqe_errv, pkt_type); } /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n", pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */ if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) { if (nes_cm_recv(rx_skb, nesvnic->netdev)) rx_skb = NULL; } if (rx_skb == NULL) goto skip_rx_indicate0; if (cqe_misc & NES_NIC_CQE_TAG_VALID) { vlan_tag = (u16)(le32_to_cpu( cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) >> 16); nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", nesvnic->netdev->name, vlan_tag); __vlan_hwaccel_put_tag(rx_skb, vlan_tag); } if (nes_use_lro) lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); else netif_receive_skb(rx_skb); skip_rx_indicate0: ; /* nesvnic->netstats.rx_packets++; */ /* nesvnic->netstats.rx_bytes += rx_pkt_size; */ } cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0; /* Accounting... */ cqe_count++; if (++head >= cq_size) head = 0; if (cqe_count == 255) { /* Replenish Nic CQ */ nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16)); /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */ nesdev->currcq_count += cqe_count; cqe_count = 0; } if (cq->rx_cqes_completed >= nesvnic->budget) break; } else { cq->cqes_pending = 0; break; } } while (1); if (nes_use_lro) lro_flush_all(&nesvnic->lro_mgr); if (sq_cqes) { barrier(); /* restart the queue if it had been stopped */ if (netif_queue_stopped(nesvnic->netdev)) netif_wake_queue(nesvnic->netdev); } cq->cq_head = head; /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n", cq->cq_number, cqe_count, cq->cq_head); */ cq->cqe_allocs_pending = cqe_count; if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) { /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */ nesdev->currcq_count += cqe_count; nes_nic_tune_timer(nesdev); } if (atomic_read(&nesvnic->rx_skbs_needed)) nes_replenish_nic_rq(nesvnic); } /** * nes_cqp_ce_handler */ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) { u64 u64temp; unsigned long flags; struct nes_hw_cqp *cqp = NULL; struct nes_cqp_request *cqp_request; struct nes_hw_cqp_wqe *cqp_wqe; u32 head; u32 cq_size; u32 cqe_count=0; u32 error_code; u32 opcode; u32 ctx_index; /* u32 counter; */ head = cq->cq_head; cq_size = cq->cq_size; do { /* process the CQE */ /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head, le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */ opcode = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]); if (opcode & NES_CQE_VALID) { cqp = &nesdev->cqp; error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]); if (error_code) { nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP," " Major/Minor codes = 0x%04X:%04X.\n", le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f, (u16)(error_code >> 16), (u16)error_code); } u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head]. cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) | ((u64)(le32_to_cpu(cq->cq_vbase[head]. cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))); cqp_request = (struct nes_cqp_request *)(unsigned long)u64temp; if (cqp_request) { if (cqp_request->waiting) { /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */ cqp_request->major_code = (u16)(error_code >> 16); cqp_request->minor_code = (u16)error_code; barrier(); cqp_request->request_done = 1; wake_up(&cqp_request->waitq); nes_put_cqp_request(nesdev, cqp_request); } else { if (cqp_request->callback) cqp_request->cqp_callback(nesdev, cqp_request); nes_free_cqp_request(nesdev, cqp_request); } } else { wake_up(&nesdev->cqp.waitq); } cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (1 << 16)); if (++cqp->sq_tail >= cqp->sq_size) cqp->sq_tail = 0; /* Accounting... */ cqe_count++; if (++head >= cq_size) head = 0; } else { break; } } while (1); cq->cq_head = head; spin_lock_irqsave(&nesdev->cqp.lock, flags); while ((!list_empty(&nesdev->cqp_pending_reqs)) && ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) & (nesdev->cqp.sq_size - 1)) != 1)) { cqp_request = list_entry(nesdev->cqp_pending_reqs.next, struct nes_cqp_request, list); list_del_init(&cqp_request->list); head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[head]; memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); barrier(); opcode = cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]; if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT) ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX; else ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX; cqp_wqe->wqe_words[ctx_index] = cpu_to_le32((u32)((unsigned long)cqp_request)); cqp_wqe->wqe_words[ctx_index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request))); nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n", cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head); /* Ring doorbell (1 WQEs) */ barrier(); nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id); } spin_unlock_irqrestore(&nesdev->cqp.lock, flags); /* Arm the CCQ */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | cq->cq_number); nes_read32(nesdev->regs+NES_CQE_ALLOC); } static u8 *locate_mpa(u8 *pkt, u32 aeq_info) { if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { /* skip over ethernet header */ pkt += ETH_HLEN; /* Skip over IP and TCP headers */ pkt += 4 * (pkt[0] & 0x0f); pkt += 4 * ((pkt[12] >> 4) & 0x0f); } return pkt; } /* Determine if incoming error pkt is rdma layer */ static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info) { u8 *pkt; u16 *mpa; u32 opcode = 0xffffffff; if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; mpa = (u16 *)locate_mpa(pkt, aeq_info); opcode = be16_to_cpu(mpa[1]) & 0xf; } return opcode; } /* Build iWARP terminate header */ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info) { u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; u16 ddp_seg_len; int copy_len = 0; u8 is_tagged = 0; u8 flush_code = 0; struct nes_terminate_hdr *termhdr; termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase; memset(termhdr, 0, 64); if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { /* Use data from offending packet to fill in ddp & rdma hdrs */ pkt = locate_mpa(pkt, aeq_info); ddp_seg_len = be16_to_cpu(*(u16 *)pkt); if (ddp_seg_len) { copy_len = 2; termhdr->hdrct = DDP_LEN_FLAG; if (pkt[2] & 0x80) { is_tagged = 1; if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { copy_len += TERM_DDP_LEN_TAGGED; termhdr->hdrct |= DDP_HDR_FLAG; } } else { if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { copy_len += TERM_DDP_LEN_UNTAGGED; termhdr->hdrct |= DDP_HDR_FLAG; } if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { copy_len += TERM_RDMA_LEN; termhdr->hdrct |= RDMA_HDR_FLAG; } } } } } switch (async_event_id) { case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_WRITE: flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_INV_STAG; break; default: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; } break; case NES_AEQE_AEID_AMP_INVALID_STAG: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; break; case NES_AEQE_AEID_AMP_BAD_QP: flush_code = IB_WC_LOC_QP_OP_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_QN; break; case NES_AEQE_AEID_AMP_BAD_STAG_KEY: case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_SEND_INV: case IWARP_OPCODE_SEND_SE_INV: flush_code = IB_WC_REM_OP_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_CANT_INV_STAG; break; default: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; } break; case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) { flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_BOUNDS; } else { flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_BOUNDS; } break; case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case NES_AEQE_AEID_PRIV_OPERATION_DENIED: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_ACCESS; break; case NES_AEQE_AEID_AMP_TO_WRAP: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_TO_WRAP; break; case NES_AEQE_AEID_AMP_BAD_PD: switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_WRITE: flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_UNASSOC_STAG; break; case IWARP_OPCODE_SEND_INV: case IWARP_OPCODE_SEND_SE_INV: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_CANT_INV_STAG; break; default: flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_UNASSOC_STAG; } break; case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; termhdr->error_code = MPA_MARKER; break; case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; termhdr->error_code = MPA_CRC; break; case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; termhdr->error_code = DDP_CATASTROPHIC_LOCAL; break; case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: case NES_AEQE_AEID_DDP_NO_L_BIT: flush_code = IB_WC_FATAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; termhdr->error_code = DDP_CATASTROPHIC_LOCAL; break; case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE; break; case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG; break; case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: flush_code = IB_WC_GENERAL_ERR; if (is_tagged) { termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_INV_DDP_VER; } else { termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER; } break; case NES_AEQE_AEID_DDP_UBE_INVALID_MO: flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MO; break; case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: flush_code = IB_WC_REM_OP_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF; break; case NES_AEQE_AEID_DDP_UBE_INVALID_QN: flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_QN; break; case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_INV_RDMAP_VER; break; case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: flush_code = IB_WC_LOC_QP_OP_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_UNEXPECTED_OP; break; default: flush_code = IB_WC_FATAL_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_UNSPECIFIED; break; } if (copy_len) memcpy(termhdr + 1, pkt, copy_len); if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) { if (aeq_info & NES_AEQE_SQ) nesqp->term_sq_flush_code = flush_code; else nesqp->term_rq_flush_code = flush_code; } return sizeof(struct nes_terminate_hdr) + copy_len; } static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype) { u64 context; unsigned long flags; u32 aeq_info; u16 async_event_id; u8 tcp_state; u8 iwarp_state; u32 termlen = 0; u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE | NES_CQP_QP_TERM_DONT_SEND_FIN; struct nes_adapter *nesadapter = nesdev->nesadapter; if (nesqp->term_flags & NES_TERM_SENT) return; /* Sanity check */ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; async_event_id = (u16)aeq_info; context = (unsigned long)nesadapter->qp_table[le32_to_cpu( aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; if (!context) { WARN_ON(!context); return; } nesqp = (struct nes_qp *)(unsigned long)context; spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; nesqp->terminate_eventtype = eventtype; spin_unlock_irqrestore(&nesqp->lock, flags); if (nesadapter->send_term_ok) termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info); else mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; if (!nesdev->iw_status) { nesqp->term_flags = NES_TERM_DONE; nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0); nes_cm_disconn(nesqp); } else { nes_terminate_start_timer(nesqp); nesqp->term_flags |= NES_TERM_SENT; nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); } } static void nes_terminate_send_fin(struct nes_device *nesdev, struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) { u32 aeq_info; u16 async_event_id; u8 tcp_state; u8 iwarp_state; unsigned long flags; aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; async_event_id = (u16)aeq_info; spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; spin_unlock_irqrestore(&nesqp->lock, flags); /* Send the fin only */ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE | NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0); } /* Cleanup after a terminate sent or received */ static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred) { u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; unsigned long flags; struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device); struct nes_device *nesdev = nesvnic->nesdev; u8 first_time = 0; spin_lock_irqsave(&nesqp->lock, flags); if (nesqp->hte_added) { nesqp->hte_added = 0; next_iwarp_state |= NES_CQP_QP_DEL_HTE; } first_time = (nesqp->term_flags & NES_TERM_DONE) == 0; nesqp->term_flags |= NES_TERM_DONE; spin_unlock_irqrestore(&nesqp->lock, flags); /* Make sure we go through this only once */ if (first_time) { if (timeout_occurred == 0) del_timer(&nesqp->terminate_timer); else next_iwarp_state |= NES_CQP_QP_RESET; nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); nes_cm_disconn(nesqp); } } static void nes_terminate_received(struct nes_device *nesdev, struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) { u32 aeq_info; u8 *pkt; u32 *mpa; u8 ddp_ctl; u8 rdma_ctl; u16 aeq_id = 0; aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { /* Terminate is not a performance path so the silicon */ /* did not validate the frame - do it now */ pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; mpa = (u32 *)locate_mpa(pkt, aeq_info); ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff; rdma_ctl = be32_to_cpu(mpa[0]) & 0xff; if ((ddp_ctl & 0xc0) != 0x40) aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC; else if ((ddp_ctl & 0x03) != 1) aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION; else if (be32_to_cpu(mpa[2]) != 2) aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN; else if (be32_to_cpu(mpa[3]) != 1) aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN; else if (be32_to_cpu(mpa[4]) != 0) aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO; else if ((rdma_ctl & 0xc0) != 0x40) aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION; if (aeq_id) { /* Bad terminate recvd - send back a terminate */ aeq_info = (aeq_info & 0xffff0000) | aeq_id; aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); return; } } nesqp->term_flags |= NES_TERM_RCVD; nesqp->terminate_eventtype = IB_EVENT_QP_FATAL; nes_terminate_start_timer(nesqp); nes_terminate_send_fin(nesdev, nesqp, aeqe); } /* Timeout routine in case terminate fails to complete */ static void nes_terminate_timeout(unsigned long context) { struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; nes_terminate_done(nesqp, 1); } /* Set a timer in case hw cannot complete the terminate sequence */ static void nes_terminate_start_timer(struct nes_qp *nesqp) { init_timer(&nesqp->terminate_timer); nesqp->terminate_timer.function = nes_terminate_timeout; nesqp->terminate_timer.expires = jiffies + HZ; nesqp->terminate_timer.data = (unsigned long)nesqp; add_timer(&nesqp->terminate_timer); } /** * nes_process_iwarp_aeqe */ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe) { u64 context; unsigned long flags; struct nes_qp *nesqp; struct nes_hw_cq *hw_cq; struct nes_cq *nescq; int resource_allocated; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 aeq_info; u32 next_iwarp_state = 0; u32 aeqe_cq_id; u16 async_event_id; u8 tcp_state; u8 iwarp_state; struct ib_event ibevent; nes_debug(NES_DBG_AEQ, "\n"); aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) { context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; } else { context = (unsigned long)nesadapter->qp_table[le32_to_cpu( aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; BUG_ON(!context); } /* context is nesqp unless async_event_id == CQ ERROR */ nesqp = (struct nes_qp *)(unsigned long)context; async_event_id = (u16)aeq_info; tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," " Tcp state = %s, iWARP state = %s\n", async_event_id, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); if (aeq_info & NES_AEQE_QP) { if (!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps, aeqe_cq_id)) return; } switch (async_event_id) { case NES_AEQE_AEID_LLP_FIN_RECEIVED: if (nesqp->term_flags) return; /* Ignore it, wait for close complete */ if (atomic_inc_return(&nesqp->close_timer_started) == 1) { if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && (nesqp->ibqp_state == IB_QPS_RTS)) { spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; spin_unlock_irqrestore(&nesqp->lock, flags); nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); nes_cm_disconn(nesqp); } nesqp->cm_id->add_ref(nesqp->cm_id); schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, NES_TIMER_TYPE_CLOSE, 1, 0); nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d)," " need ae to finish up, original_last_aeq = 0x%04X." " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n", nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), async_event_id, nesqp->last_aeq, tcp_state); } break; case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; spin_unlock_irqrestore(&nesqp->lock, flags); nes_cm_disconn(nesqp); break; case NES_AEQE_AEID_RESET_SENT: tcp_state = NES_AEQE_TCP_STATE_CLOSED; spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; nesqp->hte_added = 0; spin_unlock_irqrestore(&nesqp->lock, flags); next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE; nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); nes_cm_disconn(nesqp); break; case NES_AEQE_AEID_LLP_CONNECTION_RESET: if (atomic_read(&nesqp->close_timer_started)) return; spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; spin_unlock_irqrestore(&nesqp->lock, flags); nes_cm_disconn(nesqp); break; case NES_AEQE_AEID_TERMINATE_SENT: nes_terminate_send_fin(nesdev, nesqp, aeqe); break; case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: nes_terminate_received(nesdev, nesqp, aeqe); break; case NES_AEQE_AEID_AMP_BAD_STAG_KEY: case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: case NES_AEQE_AEID_AMP_INVALID_STAG: case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case NES_AEQE_AEID_PRIV_OPERATION_DENIED: case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: case NES_AEQE_AEID_AMP_TO_WRAP: printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n", nesqp->hwqp.qp_id, async_event_id); nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); break; case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: case NES_AEQE_AEID_DDP_UBE_INVALID_MO: case NES_AEQE_AEID_DDP_UBE_INVALID_QN: if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { aeq_info &= 0xffff0000; aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); } case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: case NES_AEQE_AEID_AMP_BAD_QP: case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: case NES_AEQE_AEID_DDP_NO_L_BIT: case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: case NES_AEQE_AEID_AMP_BAD_PD: case NES_AEQE_AEID_AMP_FASTREG_SHARED: case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG: case NES_AEQE_AEID_AMP_FASTREG_MW_STAG: case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS: case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW: case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH: case NES_AEQE_AEID_AMP_INVALIDATE_SHARED: case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS: case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG: case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG: case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG: case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG: case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS: case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS: case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT: case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED: case NES_AEQE_AEID_BAD_CLOSE: case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO: case NES_AEQE_AEID_STAG_ZERO_INVALID: case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n", nesqp->hwqp.qp_id, async_event_id); print_ip(nesqp->cm_node); if (!atomic_read(&nesqp->close_timer_started)) nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); break; case NES_AEQE_AEID_CQ_OPERATION_ERROR: context <<= 1; nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context); resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); if (resource_allocated) { printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); hw_cq = (struct nes_hw_cq *)(unsigned long)context; if (hw_cq) { nescq = container_of(hw_cq, struct nes_cq, hw_cq); if (nescq->ibcq.event_handler) { ibevent.device = nescq->ibcq.device; ibevent.event = IB_EVENT_CQ_ERR; ibevent.element.cq = &nescq->ibcq; nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context); } } } break; default: nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", async_event_id); break; } } /** * nes_iwarp_ce_handler */ void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq) { struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq); /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n", nescq->hw_cq.cq_number); */ nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number); if (nescq->ibcq.comp_handler) nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context); return; } /** * nes_manage_apbvt() */ int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port, u32 nic_index, u32 add_port) { struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_cqp_request *cqp_request; int ret = 0; u16 major_code; /* Send manage APBVT request to CQP */ cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); return -ENOMEM; } cqp_request->waiting = 1; cqp_wqe = &cqp_request->cqp_wqe; nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n", (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL", accel_local_port, accel_local_port, nic_index); nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT | ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port)); nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n"); atomic_set(&cqp_request->refcount, 2); nes_post_cqp_request(nesdev, cqp_request); if (add_port == NES_MANAGE_APBVT_ADD) ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n", ret, cqp_request->major_code, cqp_request->minor_code); major_code = cqp_request->major_code; nes_put_cqp_request(nesdev, cqp_request); if (!ret) return -ETIME; else if (major_code) return -EIO; else return 0; } /** * nes_manage_arp_cache */ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr, u32 ip_addr, u32 action) { struct nes_hw_cqp_wqe *cqp_wqe; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev; struct nes_cqp_request *cqp_request; int arp_index; nesdev = nesvnic->nesdev; arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action); if (arp_index == -1) { return; } /* update the ARP entry */ cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n"); return; } cqp_request->waiting = 0; cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32( (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index); if (action == NES_ARP_ADD) { cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID); cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32( (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); } else { cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; } nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n", nesdev->cqp.sq_head, nesdev->cqp.sq_tail); atomic_set(&cqp_request->refcount, 1); nes_post_cqp_request(nesdev, cqp_request); } /** * flush_wqes */ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, u32 which_wq, u32 wait_completion) { struct nes_cqp_request *cqp_request; struct nes_hw_cqp_wqe *cqp_wqe; u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; int ret; cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); return; } if (wait_completion) { cqp_request->waiting = 1; atomic_set(&cqp_request->refcount, 2); } else { cqp_request->waiting = 0; } cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); /* If wqe in error was identified, set code to be put into cqe */ if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) { which_wq |= NES_CQP_FLUSH_MAJ_MIN; sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code; nesqp->term_sq_flush_code = 0; } if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) { which_wq |= NES_CQP_FLUSH_MAJ_MIN; rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code; nesqp->term_rq_flush_code = 0; } if (which_wq & NES_CQP_FLUSH_MAJ_MIN) { cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code); cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code); } cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); nes_post_cqp_request(nesdev, cqp_request); if (wait_completion) { /* Wait for CQP */ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u," " CQP Major:Minor codes = 0x%04X:0x%04X\n", ret, cqp_request->major_code, cqp_request->minor_code); nes_put_cqp_request(nesdev, cqp_request); } }
gpl-2.0
menghang/android_kernel_xiaomi_msm8996
net/tipc/log.c
3453
2242
/* * net/tipc/log.c: TIPC print buffer routines for debugging * * Copyright (c) 1996-2006, Ericsson AB * Copyright (c) 2005-2007, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" /** * tipc_snprintf - append formatted output to print buffer * @buf: pointer to print buffer * @len: buffer length * @fmt: formatted info to be printed */ int tipc_snprintf(char *buf, int len, const char *fmt, ...) { int i; va_list args; va_start(args, fmt); i = vscnprintf(buf, len, fmt, args); va_end(args); return i; }
gpl-2.0
kendling/android_kernel_google_dragon
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
4733
2682
/* * OMAP2/3 clockdomain common data * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup/sleep * dependencies for the OMAP2/3 chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs or sleepdep_srcs array must have a * dep_bit assigned. So wkdep_srcs/sleepdep_srcs are really just * software-controllable dependencies. Non-software-controllable * dependencies do exist, but they are not encoded below (yet). * * 24xx does not support programmable sleep dependencies (SLEEPDEP) * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "cm-regbits-34xx.h" #include "cm-regbits-44xx.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" /* * Clockdomain dependencies for wkdeps/sleepdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* Wakeup dependency source arrays */ /* 2xxx-specific possible dependencies */ /* 2xxx PM_WKDEP_GFX: CORE, MPU, WKUP */ struct clkdm_dep gfx_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 2xxx PM_WKDEP_DSP: CORE, MPU, WKUP */ struct clkdm_dep dsp_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* * OMAP2/3-common clockdomains * * Even though the 2420 has a single PRCM module from the * interconnect's perspective, internally it does appear to have * separate PRM and CM clockdomains. The usual test case is * sys_clkout/sys_clkout2. */ /* This is an implicit clockdomain - it is never defined as such in TRM */ struct clockdomain wkup_common_clkdm = { .name = "wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .dep_bit = OMAP_EN_WKUP_SHIFT, .flags = CLKDM_ACTIVE_WITH_MPU, };
gpl-2.0
CyanogenMod/android_kernel_samsung_epicmtd
drivers/misc/ibmasm/heartbeat.c
4989
3205
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/notifier.h> #include "ibmasm.h" #include "dot_command.h" #include "lowlevel.h" static int suspend_heartbeats = 0; /* * Once the driver indicates to the service processor that it is running * - see send_os_state() - the service processor sends periodic heartbeats * to the driver. The driver must respond to the heartbeats or else the OS * will be rebooted. * In the case of a panic the interrupt handler continues to work and thus * continues to respond to heartbeats, making the service processor believe * the OS is still running and thus preventing a reboot. * To prevent this from happening a callback is added the panic_notifier_list. * Before responding to a heartbeat the driver checks if a panic has happened, * if yes it suspends heartbeat, causing the service processor to reboot as * expected. */ static int panic_happened(struct notifier_block *n, unsigned long val, void *v) { suspend_heartbeats = 1; return 0; } static struct notifier_block panic_notifier = { panic_happened, NULL, 1 }; void ibmasm_register_panic_notifier(void) { atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier); } void ibmasm_unregister_panic_notifier(void) { atomic_notifier_chain_unregister(&panic_notifier_list, &panic_notifier); } int ibmasm_heartbeat_init(struct service_processor *sp) { sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE); if (sp->heartbeat == NULL) return -ENOMEM; return 0; } void ibmasm_heartbeat_exit(struct service_processor *sp) { char tsbuf[32]; dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); suspend_heartbeats = 1; command_put(sp->heartbeat); } void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size) { struct command *cmd = sp->heartbeat; struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; char tsbuf[32]; dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); if (suspend_heartbeats) return; /* return the received dot command to sender */ cmd->status = IBMASM_CMD_PENDING; size = min(size, cmd->buffer_size); memcpy_fromio(cmd->buffer, message, size); header->type = sp_write; ibmasm_exec_command(sp, cmd); }
gpl-2.0
cm-mirror/android_kernel_nubia_nx505j
drivers/video/via/hw.c
4989
60099
/* * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/via-core.h> #include <asm/olpc.h> #include "global.h" #include "via_clock.h" static struct pll_limit cle266_pll_limits[] = { {19, 19, 4, 0}, {26, 102, 5, 0}, {53, 112, 6, 0}, {41, 100, 7, 0}, {83, 108, 8, 0}, {87, 118, 9, 0}, {95, 115, 12, 0}, {108, 108, 13, 0}, {83, 83, 17, 0}, {67, 98, 20, 0}, {121, 121, 24, 0}, {99, 99, 29, 0}, {33, 33, 3, 1}, {15, 23, 4, 1}, {37, 121, 5, 1}, {82, 82, 6, 1}, {31, 84, 7, 1}, {83, 83, 8, 1}, {76, 127, 9, 1}, {33, 121, 4, 2}, {91, 118, 5, 2}, {83, 109, 6, 2}, {90, 90, 7, 2}, {93, 93, 2, 3}, {53, 53, 3, 3}, {73, 117, 4, 3}, {101, 127, 5, 3}, {99, 99, 7, 3} }; static struct pll_limit k800_pll_limits[] = { {22, 22, 2, 0}, {28, 28, 3, 0}, {81, 112, 3, 1}, {86, 166, 4, 1}, {109, 153, 5, 1}, {66, 116, 3, 2}, {93, 137, 4, 2}, {117, 208, 5, 2}, {30, 30, 2, 3}, {69, 125, 3, 3}, {89, 161, 4, 3}, {121, 208, 5, 3}, {66, 66, 2, 4}, {85, 85, 3, 4}, {141, 161, 4, 4}, {177, 177, 5, 4} }; static struct pll_limit cx700_pll_limits[] = { {98, 98, 3, 1}, {86, 86, 4, 1}, {109, 208, 5, 1}, {68, 68, 2, 2}, {95, 116, 3, 2}, {93, 166, 4, 2}, {110, 206, 5, 2}, {174, 174, 7, 2}, {82, 109, 3, 3}, {117, 161, 4, 3}, {112, 208, 5, 3}, {141, 202, 5, 4} }; static struct pll_limit vx855_pll_limits[] = { {86, 86, 4, 1}, {108, 208, 5, 1}, {110, 208, 5, 2}, {83, 112, 3, 3}, {103, 161, 4, 3}, {112, 209, 5, 3}, {142, 161, 4, 4}, {141, 176, 5, 4} }; /* according to VIA Technologies these values are based on experiment */ static struct io_reg scaling_parameters[] = { {VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */ {VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */ {VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */ {VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */ {VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */ {VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */ {VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */ {VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */ {VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */ {VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */ {VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */ {VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */ {VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */ {VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */ }; static struct io_reg common_vga[] = { {VIACR, CR07, 0x10, 0x10}, /* [0] vertical total (bit 8) [1] vertical display end (bit 8) [2] vertical retrace start (bit 8) [3] start vertical blanking (bit 8) [4] line compare (bit 8) [5] vertical total (bit 9) [6] vertical display end (bit 9) [7] vertical retrace start (bit 9) */ {VIACR, CR08, 0xFF, 0x00}, /* [0-4] preset row scan [5-6] byte panning */ {VIACR, CR09, 0xDF, 0x40}, /* [0-4] max scan line [5] start vertical blanking (bit 9) [6] line compare (bit 9) [7] scan doubling */ {VIACR, CR0A, 0xFF, 0x1E}, /* [0-4] cursor start [5] cursor disable */ {VIACR, CR0B, 0xFF, 0x00}, /* [0-4] cursor end [5-6] cursor skew */ {VIACR, CR0E, 0xFF, 0x00}, /* [0-7] cursor location (high) */ {VIACR, CR0F, 0xFF, 0x00}, /* [0-7] cursor location (low) */ {VIACR, CR11, 0xF0, 0x80}, /* [0-3] vertical retrace end [6] memory refresh bandwidth [7] CRTC register protect enable */ {VIACR, CR14, 0xFF, 0x00}, /* [0-4] underline location [5] divide memory address clock by 4 [6] double word addressing */ {VIACR, CR17, 0xFF, 0x63}, /* [0-1] mapping of display address 13-14 [2] divide scan line clock by 2 [3] divide memory address clock by 2 [5] address wrap [6] byte mode select [7] sync enable */ {VIACR, CR18, 0xFF, 0xFF}, /* [0-7] line compare */ }; static struct fifo_depth_select display_fifo_depth_reg = { /* IGA1 FIFO Depth_Select */ {IGA1_FIFO_DEPTH_SELECT_REG_NUM, {{SR17, 0, 7} } }, /* IGA2 FIFO Depth_Select */ {IGA2_FIFO_DEPTH_SELECT_REG_NUM, {{CR68, 4, 7}, {CR94, 7, 7}, {CR95, 7, 7} } } }; static struct fifo_threshold_select fifo_threshold_select_reg = { /* IGA1 FIFO Threshold Select */ {IGA1_FIFO_THRESHOLD_REG_NUM, {{SR16, 0, 5}, {SR16, 7, 7} } }, /* IGA2 FIFO Threshold Select */ {IGA2_FIFO_THRESHOLD_REG_NUM, {{CR68, 0, 3}, {CR95, 4, 6} } } }; static struct fifo_high_threshold_select fifo_high_threshold_select_reg = { /* IGA1 FIFO High Threshold Select */ {IGA1_FIFO_HIGH_THRESHOLD_REG_NUM, {{SR18, 0, 5}, {SR18, 7, 7} } }, /* IGA2 FIFO High Threshold Select */ {IGA2_FIFO_HIGH_THRESHOLD_REG_NUM, {{CR92, 0, 3}, {CR95, 0, 2} } } }; static struct display_queue_expire_num display_queue_expire_num_reg = { /* IGA1 Display Queue Expire Num */ {IGA1_DISPLAY_QUEUE_EXPIRE_NUM_REG_NUM, {{SR22, 0, 4} } }, /* IGA2 Display Queue Expire Num */ {IGA2_DISPLAY_QUEUE_EXPIRE_NUM_REG_NUM, {{CR94, 0, 6} } } }; /* Definition Fetch Count Registers*/ static struct fetch_count fetch_count_reg = { /* IGA1 Fetch Count Register */ {IGA1_FETCH_COUNT_REG_NUM, {{SR1C, 0, 7}, {SR1D, 0, 1} } }, /* IGA2 Fetch Count Register */ {IGA2_FETCH_COUNT_REG_NUM, {{CR65, 0, 7}, {CR67, 2, 3} } } }; static struct rgbLUT palLUT_table[] = { /* {R,G,B} */ /* Index 0x00~0x03 */ {0x00, 0x00, 0x00}, {0x00, 0x00, 0x2A}, {0x00, 0x2A, 0x00}, {0x00, 0x2A, 0x2A}, /* Index 0x04~0x07 */ {0x2A, 0x00, 0x00}, {0x2A, 0x00, 0x2A}, {0x2A, 0x15, 0x00}, {0x2A, 0x2A, 0x2A}, /* Index 0x08~0x0B */ {0x15, 0x15, 0x15}, {0x15, 0x15, 0x3F}, {0x15, 0x3F, 0x15}, {0x15, 0x3F, 0x3F}, /* Index 0x0C~0x0F */ {0x3F, 0x15, 0x15}, {0x3F, 0x15, 0x3F}, {0x3F, 0x3F, 0x15}, {0x3F, 0x3F, 0x3F}, /* Index 0x10~0x13 */ {0x00, 0x00, 0x00}, {0x05, 0x05, 0x05}, {0x08, 0x08, 0x08}, {0x0B, 0x0B, 0x0B}, /* Index 0x14~0x17 */ {0x0E, 0x0E, 0x0E}, {0x11, 0x11, 0x11}, {0x14, 0x14, 0x14}, {0x18, 0x18, 0x18}, /* Index 0x18~0x1B */ {0x1C, 0x1C, 0x1C}, {0x20, 0x20, 0x20}, {0x24, 0x24, 0x24}, {0x28, 0x28, 0x28}, /* Index 0x1C~0x1F */ {0x2D, 0x2D, 0x2D}, {0x32, 0x32, 0x32}, {0x38, 0x38, 0x38}, {0x3F, 0x3F, 0x3F}, /* Index 0x20~0x23 */ {0x00, 0x00, 0x3F}, {0x10, 0x00, 0x3F}, {0x1F, 0x00, 0x3F}, {0x2F, 0x00, 0x3F}, /* Index 0x24~0x27 */ {0x3F, 0x00, 0x3F}, {0x3F, 0x00, 0x2F}, {0x3F, 0x00, 0x1F}, {0x3F, 0x00, 0x10}, /* Index 0x28~0x2B */ {0x3F, 0x00, 0x00}, {0x3F, 0x10, 0x00}, {0x3F, 0x1F, 0x00}, {0x3F, 0x2F, 0x00}, /* Index 0x2C~0x2F */ {0x3F, 0x3F, 0x00}, {0x2F, 0x3F, 0x00}, {0x1F, 0x3F, 0x00}, {0x10, 0x3F, 0x00}, /* Index 0x30~0x33 */ {0x00, 0x3F, 0x00}, {0x00, 0x3F, 0x10}, {0x00, 0x3F, 0x1F}, {0x00, 0x3F, 0x2F}, /* Index 0x34~0x37 */ {0x00, 0x3F, 0x3F}, {0x00, 0x2F, 0x3F}, {0x00, 0x1F, 0x3F}, {0x00, 0x10, 0x3F}, /* Index 0x38~0x3B */ {0x1F, 0x1F, 0x3F}, {0x27, 0x1F, 0x3F}, {0x2F, 0x1F, 0x3F}, {0x37, 0x1F, 0x3F}, /* Index 0x3C~0x3F */ {0x3F, 0x1F, 0x3F}, {0x3F, 0x1F, 0x37}, {0x3F, 0x1F, 0x2F}, {0x3F, 0x1F, 0x27}, /* Index 0x40~0x43 */ {0x3F, 0x1F, 0x1F}, {0x3F, 0x27, 0x1F}, {0x3F, 0x2F, 0x1F}, {0x3F, 0x3F, 0x1F}, /* Index 0x44~0x47 */ {0x3F, 0x3F, 0x1F}, {0x37, 0x3F, 0x1F}, {0x2F, 0x3F, 0x1F}, {0x27, 0x3F, 0x1F}, /* Index 0x48~0x4B */ {0x1F, 0x3F, 0x1F}, {0x1F, 0x3F, 0x27}, {0x1F, 0x3F, 0x2F}, {0x1F, 0x3F, 0x37}, /* Index 0x4C~0x4F */ {0x1F, 0x3F, 0x3F}, {0x1F, 0x37, 0x3F}, {0x1F, 0x2F, 0x3F}, {0x1F, 0x27, 0x3F}, /* Index 0x50~0x53 */ {0x2D, 0x2D, 0x3F}, {0x31, 0x2D, 0x3F}, {0x36, 0x2D, 0x3F}, {0x3A, 0x2D, 0x3F}, /* Index 0x54~0x57 */ {0x3F, 0x2D, 0x3F}, {0x3F, 0x2D, 0x3A}, {0x3F, 0x2D, 0x36}, {0x3F, 0x2D, 0x31}, /* Index 0x58~0x5B */ {0x3F, 0x2D, 0x2D}, {0x3F, 0x31, 0x2D}, {0x3F, 0x36, 0x2D}, {0x3F, 0x3A, 0x2D}, /* Index 0x5C~0x5F */ {0x3F, 0x3F, 0x2D}, {0x3A, 0x3F, 0x2D}, {0x36, 0x3F, 0x2D}, {0x31, 0x3F, 0x2D}, /* Index 0x60~0x63 */ {0x2D, 0x3F, 0x2D}, {0x2D, 0x3F, 0x31}, {0x2D, 0x3F, 0x36}, {0x2D, 0x3F, 0x3A}, /* Index 0x64~0x67 */ {0x2D, 0x3F, 0x3F}, {0x2D, 0x3A, 0x3F}, {0x2D, 0x36, 0x3F}, {0x2D, 0x31, 0x3F}, /* Index 0x68~0x6B */ {0x00, 0x00, 0x1C}, {0x07, 0x00, 0x1C}, {0x0E, 0x00, 0x1C}, {0x15, 0x00, 0x1C}, /* Index 0x6C~0x6F */ {0x1C, 0x00, 0x1C}, {0x1C, 0x00, 0x15}, {0x1C, 0x00, 0x0E}, {0x1C, 0x00, 0x07}, /* Index 0x70~0x73 */ {0x1C, 0x00, 0x00}, {0x1C, 0x07, 0x00}, {0x1C, 0x0E, 0x00}, {0x1C, 0x15, 0x00}, /* Index 0x74~0x77 */ {0x1C, 0x1C, 0x00}, {0x15, 0x1C, 0x00}, {0x0E, 0x1C, 0x00}, {0x07, 0x1C, 0x00}, /* Index 0x78~0x7B */ {0x00, 0x1C, 0x00}, {0x00, 0x1C, 0x07}, {0x00, 0x1C, 0x0E}, {0x00, 0x1C, 0x15}, /* Index 0x7C~0x7F */ {0x00, 0x1C, 0x1C}, {0x00, 0x15, 0x1C}, {0x00, 0x0E, 0x1C}, {0x00, 0x07, 0x1C}, /* Index 0x80~0x83 */ {0x0E, 0x0E, 0x1C}, {0x11, 0x0E, 0x1C}, {0x15, 0x0E, 0x1C}, {0x18, 0x0E, 0x1C}, /* Index 0x84~0x87 */ {0x1C, 0x0E, 0x1C}, {0x1C, 0x0E, 0x18}, {0x1C, 0x0E, 0x15}, {0x1C, 0x0E, 0x11}, /* Index 0x88~0x8B */ {0x1C, 0x0E, 0x0E}, {0x1C, 0x11, 0x0E}, {0x1C, 0x15, 0x0E}, {0x1C, 0x18, 0x0E}, /* Index 0x8C~0x8F */ {0x1C, 0x1C, 0x0E}, {0x18, 0x1C, 0x0E}, {0x15, 0x1C, 0x0E}, {0x11, 0x1C, 0x0E}, /* Index 0x90~0x93 */ {0x0E, 0x1C, 0x0E}, {0x0E, 0x1C, 0x11}, {0x0E, 0x1C, 0x15}, {0x0E, 0x1C, 0x18}, /* Index 0x94~0x97 */ {0x0E, 0x1C, 0x1C}, {0x0E, 0x18, 0x1C}, {0x0E, 0x15, 0x1C}, {0x0E, 0x11, 0x1C}, /* Index 0x98~0x9B */ {0x14, 0x14, 0x1C}, {0x16, 0x14, 0x1C}, {0x18, 0x14, 0x1C}, {0x1A, 0x14, 0x1C}, /* Index 0x9C~0x9F */ {0x1C, 0x14, 0x1C}, {0x1C, 0x14, 0x1A}, {0x1C, 0x14, 0x18}, {0x1C, 0x14, 0x16}, /* Index 0xA0~0xA3 */ {0x1C, 0x14, 0x14}, {0x1C, 0x16, 0x14}, {0x1C, 0x18, 0x14}, {0x1C, 0x1A, 0x14}, /* Index 0xA4~0xA7 */ {0x1C, 0x1C, 0x14}, {0x1A, 0x1C, 0x14}, {0x18, 0x1C, 0x14}, {0x16, 0x1C, 0x14}, /* Index 0xA8~0xAB */ {0x14, 0x1C, 0x14}, {0x14, 0x1C, 0x16}, {0x14, 0x1C, 0x18}, {0x14, 0x1C, 0x1A}, /* Index 0xAC~0xAF */ {0x14, 0x1C, 0x1C}, {0x14, 0x1A, 0x1C}, {0x14, 0x18, 0x1C}, {0x14, 0x16, 0x1C}, /* Index 0xB0~0xB3 */ {0x00, 0x00, 0x10}, {0x04, 0x00, 0x10}, {0x08, 0x00, 0x10}, {0x0C, 0x00, 0x10}, /* Index 0xB4~0xB7 */ {0x10, 0x00, 0x10}, {0x10, 0x00, 0x0C}, {0x10, 0x00, 0x08}, {0x10, 0x00, 0x04}, /* Index 0xB8~0xBB */ {0x10, 0x00, 0x00}, {0x10, 0x04, 0x00}, {0x10, 0x08, 0x00}, {0x10, 0x0C, 0x00}, /* Index 0xBC~0xBF */ {0x10, 0x10, 0x00}, {0x0C, 0x10, 0x00}, {0x08, 0x10, 0x00}, {0x04, 0x10, 0x00}, /* Index 0xC0~0xC3 */ {0x00, 0x10, 0x00}, {0x00, 0x10, 0x04}, {0x00, 0x10, 0x08}, {0x00, 0x10, 0x0C}, /* Index 0xC4~0xC7 */ {0x00, 0x10, 0x10}, {0x00, 0x0C, 0x10}, {0x00, 0x08, 0x10}, {0x00, 0x04, 0x10}, /* Index 0xC8~0xCB */ {0x08, 0x08, 0x10}, {0x0A, 0x08, 0x10}, {0x0C, 0x08, 0x10}, {0x0E, 0x08, 0x10}, /* Index 0xCC~0xCF */ {0x10, 0x08, 0x10}, {0x10, 0x08, 0x0E}, {0x10, 0x08, 0x0C}, {0x10, 0x08, 0x0A}, /* Index 0xD0~0xD3 */ {0x10, 0x08, 0x08}, {0x10, 0x0A, 0x08}, {0x10, 0x0C, 0x08}, {0x10, 0x0E, 0x08}, /* Index 0xD4~0xD7 */ {0x10, 0x10, 0x08}, {0x0E, 0x10, 0x08}, {0x0C, 0x10, 0x08}, {0x0A, 0x10, 0x08}, /* Index 0xD8~0xDB */ {0x08, 0x10, 0x08}, {0x08, 0x10, 0x0A}, {0x08, 0x10, 0x0C}, {0x08, 0x10, 0x0E}, /* Index 0xDC~0xDF */ {0x08, 0x10, 0x10}, {0x08, 0x0E, 0x10}, {0x08, 0x0C, 0x10}, {0x08, 0x0A, 0x10}, /* Index 0xE0~0xE3 */ {0x0B, 0x0B, 0x10}, {0x0C, 0x0B, 0x10}, {0x0D, 0x0B, 0x10}, {0x0F, 0x0B, 0x10}, /* Index 0xE4~0xE7 */ {0x10, 0x0B, 0x10}, {0x10, 0x0B, 0x0F}, {0x10, 0x0B, 0x0D}, {0x10, 0x0B, 0x0C}, /* Index 0xE8~0xEB */ {0x10, 0x0B, 0x0B}, {0x10, 0x0C, 0x0B}, {0x10, 0x0D, 0x0B}, {0x10, 0x0F, 0x0B}, /* Index 0xEC~0xEF */ {0x10, 0x10, 0x0B}, {0x0F, 0x10, 0x0B}, {0x0D, 0x10, 0x0B}, {0x0C, 0x10, 0x0B}, /* Index 0xF0~0xF3 */ {0x0B, 0x10, 0x0B}, {0x0B, 0x10, 0x0C}, {0x0B, 0x10, 0x0D}, {0x0B, 0x10, 0x0F}, /* Index 0xF4~0xF7 */ {0x0B, 0x10, 0x10}, {0x0B, 0x0F, 0x10}, {0x0B, 0x0D, 0x10}, {0x0B, 0x0C, 0x10}, /* Index 0xF8~0xFB */ {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00}, /* Index 0xFC~0xFF */ {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00}, {0x00, 0x00, 0x00} }; static struct via_device_mapping device_mapping[] = { {VIA_LDVP0, "LDVP0"}, {VIA_LDVP1, "LDVP1"}, {VIA_DVP0, "DVP0"}, {VIA_CRT, "CRT"}, {VIA_DVP1, "DVP1"}, {VIA_LVDS1, "LVDS1"}, {VIA_LVDS2, "LVDS2"} }; /* structure with function pointers to support clock control */ static struct via_clock clock; static void load_fix_bit_crtc_reg(void); static void __devinit init_gfx_chip_info(int chip_type); static void __devinit init_tmds_chip_info(void); static void __devinit init_lvds_chip_info(void); static void device_screen_off(void); static void device_screen_on(void); static void set_display_channel(void); static void device_off(void); static void device_on(void); static void enable_second_display_channel(void); static void disable_second_display_channel(void); void viafb_lock_crt(void) { viafb_write_reg_mask(CR11, VIACR, BIT7, BIT7); } void viafb_unlock_crt(void) { viafb_write_reg_mask(CR11, VIACR, 0, BIT7); viafb_write_reg_mask(CR47, VIACR, 0, BIT0); } static void write_dac_reg(u8 index, u8 r, u8 g, u8 b) { outb(index, LUT_INDEX_WRITE); outb(r, LUT_DATA); outb(g, LUT_DATA); outb(b, LUT_DATA); } static u32 get_dvi_devices(int output_interface) { switch (output_interface) { case INTERFACE_DVP0: return VIA_DVP0 | VIA_LDVP0; case INTERFACE_DVP1: if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) return VIA_LDVP1; else return VIA_DVP1; case INTERFACE_DFP_HIGH: if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) return 0; else return VIA_LVDS2 | VIA_DVP0; case INTERFACE_DFP_LOW: if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) return 0; else return VIA_DVP1 | VIA_LVDS1; case INTERFACE_TMDS: return VIA_LVDS1; } return 0; } static u32 get_lcd_devices(int output_interface) { switch (output_interface) { case INTERFACE_DVP0: return VIA_DVP0; case INTERFACE_DVP1: return VIA_DVP1; case INTERFACE_DFP_HIGH: return VIA_LVDS2 | VIA_DVP0; case INTERFACE_DFP_LOW: return VIA_LVDS1 | VIA_DVP1; case INTERFACE_DFP: return VIA_LVDS1 | VIA_LVDS2; case INTERFACE_LVDS0: case INTERFACE_LVDS0LVDS1: return VIA_LVDS1; case INTERFACE_LVDS1: return VIA_LVDS2; } return 0; } /*Set IGA path for each device*/ void viafb_set_iga_path(void) { int crt_iga_path = 0; if (viafb_SAMM_ON == 1) { if (viafb_CRT_ON) { if (viafb_primary_dev == CRT_Device) crt_iga_path = IGA1; else crt_iga_path = IGA2; } if (viafb_DVI_ON) { if (viafb_primary_dev == DVI_Device) viaparinfo->tmds_setting_info->iga_path = IGA1; else viaparinfo->tmds_setting_info->iga_path = IGA2; } if (viafb_LCD_ON) { if (viafb_primary_dev == LCD_Device) { if (viafb_dual_fb && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) { viaparinfo-> lvds_setting_info->iga_path = IGA2; crt_iga_path = IGA1; viaparinfo-> tmds_setting_info->iga_path = IGA1; } else viaparinfo-> lvds_setting_info->iga_path = IGA1; } else { viaparinfo->lvds_setting_info->iga_path = IGA2; } } if (viafb_LCD2_ON) { if (LCD2_Device == viafb_primary_dev) viaparinfo->lvds_setting_info2->iga_path = IGA1; else viaparinfo->lvds_setting_info2->iga_path = IGA2; } } else { viafb_SAMM_ON = 0; if (viafb_CRT_ON && viafb_LCD_ON) { crt_iga_path = IGA1; viaparinfo->lvds_setting_info->iga_path = IGA2; } else if (viafb_CRT_ON && viafb_DVI_ON) { crt_iga_path = IGA1; viaparinfo->tmds_setting_info->iga_path = IGA2; } else if (viafb_LCD_ON && viafb_DVI_ON) { viaparinfo->tmds_setting_info->iga_path = IGA1; viaparinfo->lvds_setting_info->iga_path = IGA2; } else if (viafb_LCD_ON && viafb_LCD2_ON) { viaparinfo->lvds_setting_info->iga_path = IGA2; viaparinfo->lvds_setting_info2->iga_path = IGA2; } else if (viafb_CRT_ON) { crt_iga_path = IGA1; } else if (viafb_LCD_ON) { viaparinfo->lvds_setting_info->iga_path = IGA2; } else if (viafb_DVI_ON) { viaparinfo->tmds_setting_info->iga_path = IGA1; } } viaparinfo->shared->iga1_devices = 0; viaparinfo->shared->iga2_devices = 0; if (viafb_CRT_ON) { if (crt_iga_path == IGA1) viaparinfo->shared->iga1_devices |= VIA_CRT; else viaparinfo->shared->iga2_devices |= VIA_CRT; } if (viafb_DVI_ON) { if (viaparinfo->tmds_setting_info->iga_path == IGA1) viaparinfo->shared->iga1_devices |= get_dvi_devices( viaparinfo->chip_info-> tmds_chip_info.output_interface); else viaparinfo->shared->iga2_devices |= get_dvi_devices( viaparinfo->chip_info-> tmds_chip_info.output_interface); } if (viafb_LCD_ON) { if (viaparinfo->lvds_setting_info->iga_path == IGA1) viaparinfo->shared->iga1_devices |= get_lcd_devices( viaparinfo->chip_info-> lvds_chip_info.output_interface); else viaparinfo->shared->iga2_devices |= get_lcd_devices( viaparinfo->chip_info-> lvds_chip_info.output_interface); } if (viafb_LCD2_ON) { if (viaparinfo->lvds_setting_info2->iga_path == IGA1) viaparinfo->shared->iga1_devices |= get_lcd_devices( viaparinfo->chip_info-> lvds_chip_info2.output_interface); else viaparinfo->shared->iga2_devices |= get_lcd_devices( viaparinfo->chip_info-> lvds_chip_info2.output_interface); } /* looks like the OLPC has its display wired to DVP1 and LVDS2 */ if (machine_is_olpc()) viaparinfo->shared->iga2_devices = VIA_DVP1 | VIA_LVDS2; } static void set_color_register(u8 index, u8 red, u8 green, u8 blue) { outb(0xFF, 0x3C6); /* bit mask of palette */ outb(index, 0x3C8); outb(red, 0x3C9); outb(green, 0x3C9); outb(blue, 0x3C9); } void viafb_set_primary_color_register(u8 index, u8 red, u8 green, u8 blue) { viafb_write_reg_mask(0x1A, VIASR, 0x00, 0x01); set_color_register(index, red, green, blue); } void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue) { viafb_write_reg_mask(0x1A, VIASR, 0x01, 0x01); set_color_register(index, red, green, blue); } static void set_source_common(u8 index, u8 offset, u8 iga) { u8 value, mask = 1 << offset; switch (iga) { case IGA1: value = 0x00; break; case IGA2: value = mask; break; default: printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga); return; } via_write_reg_mask(VIACR, index, value, mask); } static void set_crt_source(u8 iga) { u8 value; switch (iga) { case IGA1: value = 0x00; break; case IGA2: value = 0x40; break; default: printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga); return; } via_write_reg_mask(VIASR, 0x16, value, 0x40); } static inline void set_ldvp0_source(u8 iga) { set_source_common(0x6C, 7, iga); } static inline void set_ldvp1_source(u8 iga) { set_source_common(0x93, 7, iga); } static inline void set_dvp0_source(u8 iga) { set_source_common(0x96, 4, iga); } static inline void set_dvp1_source(u8 iga) { set_source_common(0x9B, 4, iga); } static inline void set_lvds1_source(u8 iga) { set_source_common(0x99, 4, iga); } static inline void set_lvds2_source(u8 iga) { set_source_common(0x97, 4, iga); } void via_set_source(u32 devices, u8 iga) { if (devices & VIA_LDVP0) set_ldvp0_source(iga); if (devices & VIA_LDVP1) set_ldvp1_source(iga); if (devices & VIA_DVP0) set_dvp0_source(iga); if (devices & VIA_CRT) set_crt_source(iga); if (devices & VIA_DVP1) set_dvp1_source(iga); if (devices & VIA_LVDS1) set_lvds1_source(iga); if (devices & VIA_LVDS2) set_lvds2_source(iga); } static void set_crt_state(u8 state) { u8 value; switch (state) { case VIA_STATE_ON: value = 0x00; break; case VIA_STATE_STANDBY: value = 0x10; break; case VIA_STATE_SUSPEND: value = 0x20; break; case VIA_STATE_OFF: value = 0x30; break; default: return; } via_write_reg_mask(VIACR, 0x36, value, 0x30); } static void set_dvp0_state(u8 state) { u8 value; switch (state) { case VIA_STATE_ON: value = 0xC0; break; case VIA_STATE_OFF: value = 0x00; break; default: return; } via_write_reg_mask(VIASR, 0x1E, value, 0xC0); } static void set_dvp1_state(u8 state) { u8 value; switch (state) { case VIA_STATE_ON: value = 0x30; break; case VIA_STATE_OFF: value = 0x00; break; default: return; } via_write_reg_mask(VIASR, 0x1E, value, 0x30); } static void set_lvds1_state(u8 state) { u8 value; switch (state) { case VIA_STATE_ON: value = 0x03; break; case VIA_STATE_OFF: value = 0x00; break; default: return; } via_write_reg_mask(VIASR, 0x2A, value, 0x03); } static void set_lvds2_state(u8 state) { u8 value; switch (state) { case VIA_STATE_ON: value = 0x0C; break; case VIA_STATE_OFF: value = 0x00; break; default: return; } via_write_reg_mask(VIASR, 0x2A, value, 0x0C); } void via_set_state(u32 devices, u8 state) { /* TODO: Can we enable/disable these devices? How? if (devices & VIA_LDVP0) if (devices & VIA_LDVP1) */ if (devices & VIA_DVP0) set_dvp0_state(state); if (devices & VIA_CRT) set_crt_state(state); if (devices & VIA_DVP1) set_dvp1_state(state); if (devices & VIA_LVDS1) set_lvds1_state(state); if (devices & VIA_LVDS2) set_lvds2_state(state); } void via_set_sync_polarity(u32 devices, u8 polarity) { if (polarity & ~(VIA_HSYNC_NEGATIVE | VIA_VSYNC_NEGATIVE)) { printk(KERN_WARNING "viafb: Unsupported polarity: %d\n", polarity); return; } if (devices & VIA_CRT) via_write_misc_reg_mask(polarity << 6, 0xC0); if (devices & VIA_DVP1) via_write_reg_mask(VIACR, 0x9B, polarity << 5, 0x60); if (devices & VIA_LVDS1) via_write_reg_mask(VIACR, 0x99, polarity << 5, 0x60); if (devices & VIA_LVDS2) via_write_reg_mask(VIACR, 0x97, polarity << 5, 0x60); } u32 via_parse_odev(char *input, char **end) { char *ptr = input; u32 odev = 0; bool next = true; int i, len; while (next) { next = false; for (i = 0; i < ARRAY_SIZE(device_mapping); i++) { len = strlen(device_mapping[i].name); if (!strncmp(ptr, device_mapping[i].name, len)) { odev |= device_mapping[i].device; ptr += len; if (*ptr == ',') { ptr++; next = true; } } } } *end = ptr; return odev; } void via_odev_to_seq(struct seq_file *m, u32 odev) { int i, count = 0; for (i = 0; i < ARRAY_SIZE(device_mapping); i++) { if (odev & device_mapping[i].device) { if (count > 0) seq_putc(m, ','); seq_puts(m, device_mapping[i].name); count++; } } seq_putc(m, '\n'); } static void load_fix_bit_crtc_reg(void) { viafb_unlock_crt(); /* always set to 1 */ viafb_write_reg_mask(CR03, VIACR, 0x80, BIT7); /* line compare should set all bits = 1 (extend modes) */ viafb_write_reg_mask(CR35, VIACR, 0x10, BIT4); /* line compare should set all bits = 1 (extend modes) */ viafb_write_reg_mask(CR33, VIACR, 0x06, BIT0 + BIT1 + BIT2); /*viafb_write_reg_mask(CR32, VIACR, 0x01, BIT0); */ viafb_lock_crt(); /* If K8M800, enable Prefetch Mode. */ if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) || (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K8M890)) viafb_write_reg_mask(CR33, VIACR, 0x08, BIT3); if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) && (viaparinfo->chip_info->gfx_chip_revision == CLE266_REVISION_AX)) viafb_write_reg_mask(SR1A, VIASR, 0x02, BIT1); } void viafb_load_reg(int timing_value, int viafb_load_reg_num, struct io_register *reg, int io_type) { int reg_mask; int bit_num = 0; int data; int i, j; int shift_next_reg; int start_index, end_index, cr_index; u16 get_bit; for (i = 0; i < viafb_load_reg_num; i++) { reg_mask = 0; data = 0; start_index = reg[i].start_bit; end_index = reg[i].end_bit; cr_index = reg[i].io_addr; shift_next_reg = bit_num; for (j = start_index; j <= end_index; j++) { /*if (bit_num==8) timing_value = timing_value >>8; */ reg_mask = reg_mask | (BIT0 << j); get_bit = (timing_value & (BIT0 << bit_num)); data = data | ((get_bit >> shift_next_reg) << start_index); bit_num++; } if (io_type == VIACR) viafb_write_reg_mask(cr_index, VIACR, data, reg_mask); else viafb_write_reg_mask(cr_index, VIASR, data, reg_mask); } } /* Write Registers */ void viafb_write_regx(struct io_reg RegTable[], int ItemNum) { int i; /*DEBUG_MSG(KERN_INFO "Table Size : %x!!\n",ItemNum ); */ for (i = 0; i < ItemNum; i++) via_write_reg_mask(RegTable[i].port, RegTable[i].index, RegTable[i].value, RegTable[i].mask); } void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga) { int reg_value; int viafb_load_reg_num; struct io_register *reg = NULL; switch (set_iga) { case IGA1: reg_value = IGA1_FETCH_COUNT_FORMULA(h_addr, bpp_byte); viafb_load_reg_num = fetch_count_reg. iga1_fetch_count_reg.reg_num; reg = fetch_count_reg.iga1_fetch_count_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIASR); break; case IGA2: reg_value = IGA2_FETCH_COUNT_FORMULA(h_addr, bpp_byte); viafb_load_reg_num = fetch_count_reg. iga2_fetch_count_reg.reg_num; reg = fetch_count_reg.iga2_fetch_count_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; } } void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active) { int reg_value; int viafb_load_reg_num; struct io_register *reg = NULL; int iga1_fifo_max_depth = 0, iga1_fifo_threshold = 0, iga1_fifo_high_threshold = 0, iga1_display_queue_expire_num = 0; int iga2_fifo_max_depth = 0, iga2_fifo_threshold = 0, iga2_fifo_high_threshold = 0, iga2_display_queue_expire_num = 0; if (set_iga == IGA1) { if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) { iga1_fifo_max_depth = K800_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = K800_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = K800_IGA1_FIFO_HIGH_THRESHOLD; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga1_display_queue_expire_num = 16; else iga1_display_queue_expire_num = K800_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_PM800) { iga1_fifo_max_depth = P880_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = P880_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = P880_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = P880_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga1_display_queue_expire_num = 16; else iga1_display_queue_expire_num = P880_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CN700) { iga1_fifo_max_depth = CN700_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = CN700_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = CN700_IGA1_FIFO_HIGH_THRESHOLD; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga1_display_queue_expire_num = 16; else iga1_display_queue_expire_num = CN700_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { iga1_fifo_max_depth = CX700_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = CX700_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = CX700_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = CX700_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K8M890) { iga1_fifo_max_depth = K8M890_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = K8M890_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = K8M890_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = K8M890_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_P4M890) { iga1_fifo_max_depth = P4M890_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = P4M890_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = P4M890_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = P4M890_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_P4M900) { iga1_fifo_max_depth = P4M900_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = P4M900_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = P4M900_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = P4M900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX800) { iga1_fifo_max_depth = VX800_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = VX800_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = VX800_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = VX800_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX855) { iga1_fifo_max_depth = VX855_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = VX855_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = VX855_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = VX855_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) { iga1_fifo_max_depth = VX900_IGA1_FIFO_MAX_DEPTH; iga1_fifo_threshold = VX900_IGA1_FIFO_THRESHOLD; iga1_fifo_high_threshold = VX900_IGA1_FIFO_HIGH_THRESHOLD; iga1_display_queue_expire_num = VX900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM; } /* Set Display FIFO Depath Select */ reg_value = IGA1_FIFO_DEPTH_SELECT_FORMULA(iga1_fifo_max_depth); viafb_load_reg_num = display_fifo_depth_reg.iga1_fifo_depth_select_reg.reg_num; reg = display_fifo_depth_reg.iga1_fifo_depth_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIASR); /* Set Display FIFO Threshold Select */ reg_value = IGA1_FIFO_THRESHOLD_FORMULA(iga1_fifo_threshold); viafb_load_reg_num = fifo_threshold_select_reg. iga1_fifo_threshold_select_reg.reg_num; reg = fifo_threshold_select_reg. iga1_fifo_threshold_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIASR); /* Set FIFO High Threshold Select */ reg_value = IGA1_FIFO_HIGH_THRESHOLD_FORMULA(iga1_fifo_high_threshold); viafb_load_reg_num = fifo_high_threshold_select_reg. iga1_fifo_high_threshold_select_reg.reg_num; reg = fifo_high_threshold_select_reg. iga1_fifo_high_threshold_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIASR); /* Set Display Queue Expire Num */ reg_value = IGA1_DISPLAY_QUEUE_EXPIRE_NUM_FORMULA (iga1_display_queue_expire_num); viafb_load_reg_num = display_queue_expire_num_reg. iga1_display_queue_expire_num_reg.reg_num; reg = display_queue_expire_num_reg. iga1_display_queue_expire_num_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIASR); } else { if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) { iga2_fifo_max_depth = K800_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = K800_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = K800_IGA2_FIFO_HIGH_THRESHOLD; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga2_display_queue_expire_num = 16; else iga2_display_queue_expire_num = K800_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_PM800) { iga2_fifo_max_depth = P880_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = P880_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = P880_IGA2_FIFO_HIGH_THRESHOLD; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga2_display_queue_expire_num = 16; else iga2_display_queue_expire_num = P880_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CN700) { iga2_fifo_max_depth = CN700_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = CN700_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = CN700_IGA2_FIFO_HIGH_THRESHOLD; /* If resolution > 1280x1024, expire length = 64, else expire length = 128 */ if ((hor_active > 1280) && (ver_active > 1024)) iga2_display_queue_expire_num = 16; else iga2_display_queue_expire_num = CN700_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { iga2_fifo_max_depth = CX700_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = CX700_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = CX700_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = CX700_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K8M890) { iga2_fifo_max_depth = K8M890_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = K8M890_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = K8M890_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = K8M890_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_P4M890) { iga2_fifo_max_depth = P4M890_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = P4M890_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = P4M890_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = P4M890_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_P4M900) { iga2_fifo_max_depth = P4M900_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = P4M900_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = P4M900_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = P4M900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX800) { iga2_fifo_max_depth = VX800_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = VX800_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = VX800_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = VX800_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX855) { iga2_fifo_max_depth = VX855_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = VX855_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = VX855_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) { iga2_fifo_max_depth = VX900_IGA2_FIFO_MAX_DEPTH; iga2_fifo_threshold = VX900_IGA2_FIFO_THRESHOLD; iga2_fifo_high_threshold = VX900_IGA2_FIFO_HIGH_THRESHOLD; iga2_display_queue_expire_num = VX900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM; } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) { /* Set Display FIFO Depath Select */ reg_value = IGA2_FIFO_DEPTH_SELECT_FORMULA(iga2_fifo_max_depth) - 1; /* Patch LCD in IGA2 case */ viafb_load_reg_num = display_fifo_depth_reg. iga2_fifo_depth_select_reg.reg_num; reg = display_fifo_depth_reg. iga2_fifo_depth_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); } else { /* Set Display FIFO Depath Select */ reg_value = IGA2_FIFO_DEPTH_SELECT_FORMULA(iga2_fifo_max_depth); viafb_load_reg_num = display_fifo_depth_reg. iga2_fifo_depth_select_reg.reg_num; reg = display_fifo_depth_reg. iga2_fifo_depth_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); } /* Set Display FIFO Threshold Select */ reg_value = IGA2_FIFO_THRESHOLD_FORMULA(iga2_fifo_threshold); viafb_load_reg_num = fifo_threshold_select_reg. iga2_fifo_threshold_select_reg.reg_num; reg = fifo_threshold_select_reg. iga2_fifo_threshold_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); /* Set FIFO High Threshold Select */ reg_value = IGA2_FIFO_HIGH_THRESHOLD_FORMULA(iga2_fifo_high_threshold); viafb_load_reg_num = fifo_high_threshold_select_reg. iga2_fifo_high_threshold_select_reg.reg_num; reg = fifo_high_threshold_select_reg. iga2_fifo_high_threshold_select_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); /* Set Display Queue Expire Num */ reg_value = IGA2_DISPLAY_QUEUE_EXPIRE_NUM_FORMULA (iga2_display_queue_expire_num); viafb_load_reg_num = display_queue_expire_num_reg. iga2_display_queue_expire_num_reg.reg_num; reg = display_queue_expire_num_reg. iga2_display_queue_expire_num_reg.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); } } static struct via_pll_config get_pll_config(struct pll_limit *limits, int size, int clk) { struct via_pll_config cur, up, down, best = {0, 1, 0}; const u32 f0 = 14318180; /* X1 frequency */ int i, f; for (i = 0; i < size; i++) { cur.rshift = limits[i].rshift; cur.divisor = limits[i].divisor; cur.multiplier = clk / ((f0 / cur.divisor)>>cur.rshift); f = abs(get_pll_output_frequency(f0, cur) - clk); up = down = cur; up.multiplier++; down.multiplier--; if (abs(get_pll_output_frequency(f0, up) - clk) < f) cur = up; else if (abs(get_pll_output_frequency(f0, down) - clk) < f) cur = down; if (cur.multiplier < limits[i].multiplier_min) cur.multiplier = limits[i].multiplier_min; else if (cur.multiplier > limits[i].multiplier_max) cur.multiplier = limits[i].multiplier_max; f = abs(get_pll_output_frequency(f0, cur) - clk); if (f < abs(get_pll_output_frequency(f0, best) - clk)) best = cur; } return best; } static struct via_pll_config get_best_pll_config(int clk) { struct via_pll_config config; switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: config = get_pll_config(cle266_pll_limits, ARRAY_SIZE(cle266_pll_limits), clk); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: config = get_pll_config(k800_pll_limits, ARRAY_SIZE(k800_pll_limits), clk); break; case UNICHROME_CX700: case UNICHROME_CN750: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: case UNICHROME_VX800: config = get_pll_config(cx700_pll_limits, ARRAY_SIZE(cx700_pll_limits), clk); break; case UNICHROME_VX855: case UNICHROME_VX900: config = get_pll_config(vx855_pll_limits, ARRAY_SIZE(vx855_pll_limits), clk); break; } return config; } /* Set VCLK*/ void viafb_set_vclock(u32 clk, int set_iga) { struct via_pll_config config = get_best_pll_config(clk); if (set_iga == IGA1) clock.set_primary_pll(config); if (set_iga == IGA2) clock.set_secondary_pll(config); /* Fire! */ via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */ } struct display_timing var_to_timing(const struct fb_var_screeninfo *var, u16 cxres, u16 cyres) { struct display_timing timing; u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2; timing.hor_addr = cxres; timing.hor_sync_start = timing.hor_addr + var->right_margin + dx; timing.hor_sync_end = timing.hor_sync_start + var->hsync_len; timing.hor_total = timing.hor_sync_end + var->left_margin + dx; timing.hor_blank_start = timing.hor_addr + dx; timing.hor_blank_end = timing.hor_total - dx; timing.ver_addr = cyres; timing.ver_sync_start = timing.ver_addr + var->lower_margin + dy; timing.ver_sync_end = timing.ver_sync_start + var->vsync_len; timing.ver_total = timing.ver_sync_end + var->upper_margin + dy; timing.ver_blank_start = timing.ver_addr + dy; timing.ver_blank_end = timing.ver_total - dy; return timing; } void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var, u16 cxres, u16 cyres, int iga) { struct display_timing crt_reg = var_to_timing(var, cxres ? cxres : var->xres, cyres ? cyres : var->yres); if (iga == IGA1) via_set_primary_timing(&crt_reg); else if (iga == IGA2) via_set_secondary_timing(&crt_reg); viafb_load_fetch_count_reg(var->xres, var->bits_per_pixel / 8, iga); if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266 && viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400) viafb_load_FIFO_reg(iga, var->xres, var->yres); viafb_set_vclock(PICOS2KHZ(var->pixclock) * 1000, iga); } void __devinit viafb_init_chip_info(int chip_type) { via_clock_init(&clock, chip_type); init_gfx_chip_info(chip_type); init_tmds_chip_info(); init_lvds_chip_info(); /*Set IGA path for each device */ viafb_set_iga_path(); viaparinfo->lvds_setting_info->display_method = viafb_lcd_dsp_method; viaparinfo->lvds_setting_info->lcd_mode = viafb_lcd_mode; viaparinfo->lvds_setting_info2->display_method = viaparinfo->lvds_setting_info->display_method; viaparinfo->lvds_setting_info2->lcd_mode = viaparinfo->lvds_setting_info->lcd_mode; } void viafb_update_device_setting(int hres, int vres, int bpp, int flag) { if (flag == 0) { viaparinfo->tmds_setting_info->h_active = hres; viaparinfo->tmds_setting_info->v_active = vres; } else { if (viaparinfo->tmds_setting_info->iga_path == IGA2) { viaparinfo->tmds_setting_info->h_active = hres; viaparinfo->tmds_setting_info->v_active = vres; } } } static void __devinit init_gfx_chip_info(int chip_type) { u8 tmp; viaparinfo->chip_info->gfx_chip_name = chip_type; /* Check revision of CLE266 Chip */ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) { /* CR4F only define in CLE266.CX chip */ tmp = viafb_read_reg(VIACR, CR4F); viafb_write_reg(CR4F, VIACR, 0x55); if (viafb_read_reg(VIACR, CR4F) != 0x55) viaparinfo->chip_info->gfx_chip_revision = CLE266_REVISION_AX; else viaparinfo->chip_info->gfx_chip_revision = CLE266_REVISION_CX; /* restore orignal CR4F value */ viafb_write_reg(CR4F, VIACR, tmp); } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { tmp = viafb_read_reg(VIASR, SR43); DEBUG_MSG(KERN_INFO "SR43:%X\n", tmp); if (tmp & 0x02) { viaparinfo->chip_info->gfx_chip_revision = CX700_REVISION_700M2; } else if (tmp & 0x40) { viaparinfo->chip_info->gfx_chip_revision = CX700_REVISION_700M; } else { viaparinfo->chip_info->gfx_chip_revision = CX700_REVISION_700; } } /* Determine which 2D engine we have */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_VX800: case UNICHROME_VX855: case UNICHROME_VX900: viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1; break; case UNICHROME_K8M890: case UNICHROME_P4M900: viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H5; break; default: viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H2; break; } } static void __devinit init_tmds_chip_info(void) { viafb_tmds_trasmitter_identify(); if (INTERFACE_NONE == viaparinfo->chip_info->tmds_chip_info. output_interface) { switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CX700: { /* we should check support by hardware layout.*/ if ((viafb_display_hardware_layout == HW_LAYOUT_DVI_ONLY) || (viafb_display_hardware_layout == HW_LAYOUT_LCD_DVI)) { viaparinfo->chip_info->tmds_chip_info. output_interface = INTERFACE_TMDS; } else { viaparinfo->chip_info->tmds_chip_info. output_interface = INTERFACE_NONE; } break; } case UNICHROME_K8M890: case UNICHROME_P4M900: case UNICHROME_P4M890: /* TMDS on PCIE, we set DFPLOW as default. */ viaparinfo->chip_info->tmds_chip_info.output_interface = INTERFACE_DFP_LOW; break; default: { /* set DVP1 default for DVI */ viaparinfo->chip_info->tmds_chip_info .output_interface = INTERFACE_DVP1; } } } DEBUG_MSG(KERN_INFO "TMDS Chip = %d\n", viaparinfo->chip_info->tmds_chip_info.tmds_chip_name); viafb_init_dvi_size(&viaparinfo->shared->chip_info.tmds_chip_info, &viaparinfo->shared->tmds_setting_info); } static void __devinit init_lvds_chip_info(void) { viafb_lvds_trasmitter_identify(); viafb_init_lcd_size(); viafb_init_lvds_output_interface(&viaparinfo->chip_info->lvds_chip_info, viaparinfo->lvds_setting_info); if (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { viafb_init_lvds_output_interface(&viaparinfo->chip_info-> lvds_chip_info2, viaparinfo->lvds_setting_info2); } /*If CX700,two singel LCD, we need to reassign LCD interface to different LVDS port */ if ((UNICHROME_CX700 == viaparinfo->chip_info->gfx_chip_name) && (HW_LAYOUT_LCD1_LCD2 == viafb_display_hardware_layout)) { if ((INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info. lvds_chip_name) && (INTEGRATED_LVDS == viaparinfo->chip_info-> lvds_chip_info2.lvds_chip_name)) { viaparinfo->chip_info->lvds_chip_info.output_interface = INTERFACE_LVDS0; viaparinfo->chip_info->lvds_chip_info2. output_interface = INTERFACE_LVDS1; } } DEBUG_MSG(KERN_INFO "LVDS Chip = %d\n", viaparinfo->chip_info->lvds_chip_info.lvds_chip_name); DEBUG_MSG(KERN_INFO "LVDS1 output_interface = %d\n", viaparinfo->chip_info->lvds_chip_info.output_interface); DEBUG_MSG(KERN_INFO "LVDS2 output_interface = %d\n", viaparinfo->chip_info->lvds_chip_info.output_interface); } void __devinit viafb_init_dac(int set_iga) { int i; u8 tmp; if (set_iga == IGA1) { /* access Primary Display's LUT */ viafb_write_reg_mask(SR1A, VIASR, 0x00, BIT0); /* turn off LCK */ viafb_write_reg_mask(SR1B, VIASR, 0x00, BIT7 + BIT6); for (i = 0; i < 256; i++) { write_dac_reg(i, palLUT_table[i].red, palLUT_table[i].green, palLUT_table[i].blue); } /* turn on LCK */ viafb_write_reg_mask(SR1B, VIASR, 0xC0, BIT7 + BIT6); } else { tmp = viafb_read_reg(VIACR, CR6A); /* access Secondary Display's LUT */ viafb_write_reg_mask(CR6A, VIACR, 0x40, BIT6); viafb_write_reg_mask(SR1A, VIASR, 0x01, BIT0); for (i = 0; i < 256; i++) { write_dac_reg(i, palLUT_table[i].red, palLUT_table[i].green, palLUT_table[i].blue); } /* set IGA1 DAC for default */ viafb_write_reg_mask(SR1A, VIASR, 0x00, BIT0); viafb_write_reg(CR6A, VIACR, tmp); } } static void device_screen_off(void) { /* turn off CRT screen (IGA1) */ viafb_write_reg_mask(SR01, VIASR, 0x20, BIT5); } static void device_screen_on(void) { /* turn on CRT screen (IGA1) */ viafb_write_reg_mask(SR01, VIASR, 0x00, BIT5); } static void set_display_channel(void) { /*If viafb_LCD2_ON, on cx700, internal lvds's information is keeped on lvds_setting_info2 */ if (viafb_LCD2_ON && viaparinfo->lvds_setting_info2->device_lcd_dualedge) { /* For dual channel LCD: */ /* Set to Dual LVDS channel. */ viafb_write_reg_mask(CRD2, VIACR, 0x20, BIT4 + BIT5); } else if (viafb_LCD_ON && viafb_DVI_ON) { /* For LCD+DFP: */ /* Set to LVDS1 + TMDS channel. */ viafb_write_reg_mask(CRD2, VIACR, 0x10, BIT4 + BIT5); } else if (viafb_DVI_ON) { /* Set to single TMDS channel. */ viafb_write_reg_mask(CRD2, VIACR, 0x30, BIT4 + BIT5); } else if (viafb_LCD_ON) { if (viaparinfo->lvds_setting_info->device_lcd_dualedge) { /* For dual channel LCD: */ /* Set to Dual LVDS channel. */ viafb_write_reg_mask(CRD2, VIACR, 0x20, BIT4 + BIT5); } else { /* Set to LVDS0 + LVDS1 channel. */ viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT4 + BIT5); } } } static u8 get_sync(struct fb_var_screeninfo *var) { u8 polarity = 0; if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) polarity |= VIA_HSYNC_NEGATIVE; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) polarity |= VIA_VSYNC_NEGATIVE; return polarity; } static void hw_init(void) { int i; inb(VIAStatus); outb(0x00, VIAAR); /* Write Common Setting for Video Mode */ viafb_write_regx(common_vga, ARRAY_SIZE(common_vga)); switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: viafb_write_regx(CLE266_ModeXregs, NUM_TOTAL_CLE266_ModeXregs); break; case UNICHROME_K400: viafb_write_regx(KM400_ModeXregs, NUM_TOTAL_KM400_ModeXregs); break; case UNICHROME_K800: case UNICHROME_PM800: viafb_write_regx(CN400_ModeXregs, NUM_TOTAL_CN400_ModeXregs); break; case UNICHROME_CN700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: viafb_write_regx(CN700_ModeXregs, NUM_TOTAL_CN700_ModeXregs); break; case UNICHROME_CX700: case UNICHROME_VX800: viafb_write_regx(CX700_ModeXregs, NUM_TOTAL_CX700_ModeXregs); break; case UNICHROME_VX855: case UNICHROME_VX900: viafb_write_regx(VX855_ModeXregs, NUM_TOTAL_VX855_ModeXregs); break; } /* magic required on VX900 for correct modesetting on IGA1 */ via_write_reg_mask(VIACR, 0x45, 0x00, 0x01); /* probably this should go to the scaling code one day */ via_write_reg_mask(VIACR, 0xFD, 0, 0x80); /* VX900 hw scale on IGA2 */ viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters)); /* Fill VPIT Parameters */ /* Write Misc Register */ outb(VPIT.Misc, VIA_MISC_REG_WRITE); /* Write Sequencer */ for (i = 1; i <= StdSR; i++) via_write_reg(VIASR, i, VPIT.SR[i - 1]); viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2); /* Write Graphic Controller */ for (i = 0; i < StdGR; i++) via_write_reg(VIAGR, i, VPIT.GR[i]); /* Write Attribute Controller */ for (i = 0; i < StdAR; i++) { inb(VIAStatus); outb(i, VIAAR); outb(VPIT.AR[i], VIAAR); } inb(VIAStatus); outb(0x20, VIAAR); load_fix_bit_crtc_reg(); } int viafb_setmode(void) { int j, cxres = 0, cyres = 0; int port; u32 devices = viaparinfo->shared->iga1_devices | viaparinfo->shared->iga2_devices; u8 value, index, mask; struct fb_var_screeninfo var2; device_screen_off(); device_off(); via_set_state(devices, VIA_STATE_OFF); hw_init(); /* Update Patch Register */ if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266 || viaparinfo->chip_info->gfx_chip_name == UNICHROME_K400) && viafbinfo->var.xres == 1024 && viafbinfo->var.yres == 768) { for (j = 0; j < res_patch_table[0].table_length; j++) { index = res_patch_table[0].io_reg_table[j].index; port = res_patch_table[0].io_reg_table[j].port; value = res_patch_table[0].io_reg_table[j].value; mask = res_patch_table[0].io_reg_table[j].mask; viafb_write_reg_mask(index, port, value, mask); } } via_set_primary_pitch(viafbinfo->fix.line_length); via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length : viafbinfo->fix.line_length); via_set_primary_color_depth(viaparinfo->depth); via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth : viaparinfo->depth); via_set_source(viaparinfo->shared->iga1_devices, IGA1); via_set_source(viaparinfo->shared->iga2_devices, IGA2); if (viaparinfo->shared->iga2_devices) enable_second_display_channel(); else disable_second_display_channel(); /* Update Refresh Rate Setting */ /* Clear On Screen */ if (viafb_dual_fb) { var2 = viafbinfo1->var; } else if (viafb_SAMM_ON) { viafb_fill_var_timing_info(&var2, viafb_get_best_mode( viafb_second_xres, viafb_second_yres, viafb_refresh1)); cxres = viafbinfo->var.xres; cyres = viafbinfo->var.yres; var2.bits_per_pixel = viafbinfo->var.bits_per_pixel; } /* CRT set mode */ if (viafb_CRT_ON) { if (viaparinfo->shared->iga2_devices & VIA_CRT && viafb_SAMM_ON) viafb_fill_crtc_timing(&var2, cxres, cyres, IGA2); else viafb_fill_crtc_timing(&viafbinfo->var, 0, 0, (viaparinfo->shared->iga1_devices & VIA_CRT) ? IGA1 : IGA2); /* Patch if set_hres is not 8 alignment (1366) to viafb_setmode to 8 alignment (1368),there is several pixels (2 pixels) on right side of screen. */ if (viafbinfo->var.xres % 8) { viafb_unlock_crt(); viafb_write_reg(CR02, VIACR, viafb_read_reg(VIACR, CR02) - 1); viafb_lock_crt(); } } if (viafb_DVI_ON) { if (viaparinfo->shared->tmds_setting_info.iga_path == IGA2 && viafb_SAMM_ON) viafb_dvi_set_mode(&var2, cxres, cyres, IGA2); else viafb_dvi_set_mode(&viafbinfo->var, 0, 0, viaparinfo->tmds_setting_info->iga_path); } if (viafb_LCD_ON) { if (viafb_SAMM_ON && (viaparinfo->lvds_setting_info->iga_path == IGA2)) { viafb_lcd_set_mode(&var2, cxres, cyres, viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else { /* IGA1 doesn't have LCD scaling, so set it center. */ if (viaparinfo->lvds_setting_info->iga_path == IGA1) { viaparinfo->lvds_setting_info->display_method = LCD_CENTERING; } viafb_lcd_set_mode(&viafbinfo->var, 0, 0, viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } } if (viafb_LCD2_ON) { if (viafb_SAMM_ON && (viaparinfo->lvds_setting_info2->iga_path == IGA2)) { viafb_lcd_set_mode(&var2, cxres, cyres, viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2); } else { /* IGA1 doesn't have LCD scaling, so set it center. */ if (viaparinfo->lvds_setting_info2->iga_path == IGA1) { viaparinfo->lvds_setting_info2->display_method = LCD_CENTERING; } viafb_lcd_set_mode(&viafbinfo->var, 0, 0, viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2); } } if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) && (viafb_LCD_ON || viafb_DVI_ON)) set_display_channel(); /* If set mode normally, save resolution information for hot-plug . */ if (!viafb_hotplug) { viafb_hotplug_Xres = viafbinfo->var.xres; viafb_hotplug_Yres = viafbinfo->var.yres; viafb_hotplug_bpp = viafbinfo->var.bits_per_pixel; viafb_hotplug_refresh = viafb_refresh; if (viafb_DVI_ON) viafb_DeviceStatus = DVI_Device; else viafb_DeviceStatus = CRT_Device; } device_on(); if (!viafb_SAMM_ON) via_set_sync_polarity(devices, get_sync(&viafbinfo->var)); else { via_set_sync_polarity(viaparinfo->shared->iga1_devices, get_sync(&viafbinfo->var)); via_set_sync_polarity(viaparinfo->shared->iga2_devices, get_sync(&var2)); } clock.set_engine_pll_state(VIA_STATE_ON); clock.set_primary_clock_source(VIA_CLKSRC_X1, true); clock.set_secondary_clock_source(VIA_CLKSRC_X1, true); #ifdef CONFIG_FB_VIA_X_COMPATIBILITY clock.set_primary_pll_state(VIA_STATE_ON); clock.set_primary_clock_state(VIA_STATE_ON); clock.set_secondary_pll_state(VIA_STATE_ON); clock.set_secondary_clock_state(VIA_STATE_ON); #else if (viaparinfo->shared->iga1_devices) { clock.set_primary_pll_state(VIA_STATE_ON); clock.set_primary_clock_state(VIA_STATE_ON); } else { clock.set_primary_pll_state(VIA_STATE_OFF); clock.set_primary_clock_state(VIA_STATE_OFF); } if (viaparinfo->shared->iga2_devices) { clock.set_secondary_pll_state(VIA_STATE_ON); clock.set_secondary_clock_state(VIA_STATE_ON); } else { clock.set_secondary_pll_state(VIA_STATE_OFF); clock.set_secondary_clock_state(VIA_STATE_OFF); } #endif /*CONFIG_FB_VIA_X_COMPATIBILITY*/ via_set_state(devices, VIA_STATE_ON); device_screen_on(); return 1; } int viafb_get_refresh(int hres, int vres, u32 long_refresh) { const struct fb_videomode *best; best = viafb_get_best_mode(hres, vres, long_refresh); if (!best) return 60; if (abs(best->refresh - long_refresh) > 3) { if (hres == 1200 && vres == 900) return 49; /* OLPC DCON only supports 50 Hz */ else return 60; } return best->refresh; } static void device_off(void) { viafb_dvi_disable(); viafb_lcd_disable(); } static void device_on(void) { if (viafb_DVI_ON == 1) viafb_dvi_enable(); if (viafb_LCD_ON == 1) viafb_lcd_enable(); } static void enable_second_display_channel(void) { /* to enable second display channel. */ viafb_write_reg_mask(CR6A, VIACR, 0x00, BIT6); viafb_write_reg_mask(CR6A, VIACR, BIT7, BIT7); viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6); } static void disable_second_display_channel(void) { /* to disable second display channel. */ viafb_write_reg_mask(CR6A, VIACR, 0x00, BIT6); viafb_write_reg_mask(CR6A, VIACR, 0x00, BIT7); viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6); } void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\ *p_gfx_dpa_setting) { switch (output_interface) { case INTERFACE_DVP0: { /* DVP0 Clock Polarity and Adjust: */ viafb_write_reg_mask(CR96, VIACR, p_gfx_dpa_setting->DVP0, 0x0F); /* DVP0 Clock and Data Pads Driving: */ viafb_write_reg_mask(SR1E, VIASR, p_gfx_dpa_setting->DVP0ClockDri_S, BIT2); viafb_write_reg_mask(SR2A, VIASR, p_gfx_dpa_setting->DVP0ClockDri_S1, BIT4); viafb_write_reg_mask(SR1B, VIASR, p_gfx_dpa_setting->DVP0DataDri_S, BIT1); viafb_write_reg_mask(SR2A, VIASR, p_gfx_dpa_setting->DVP0DataDri_S1, BIT5); break; } case INTERFACE_DVP1: { /* DVP1 Clock Polarity and Adjust: */ viafb_write_reg_mask(CR9B, VIACR, p_gfx_dpa_setting->DVP1, 0x0F); /* DVP1 Clock and Data Pads Driving: */ viafb_write_reg_mask(SR65, VIASR, p_gfx_dpa_setting->DVP1Driving, 0x0F); break; } case INTERFACE_DFP_HIGH: { viafb_write_reg_mask(CR97, VIACR, p_gfx_dpa_setting->DFPHigh, 0x0F); break; } case INTERFACE_DFP_LOW: { viafb_write_reg_mask(CR99, VIACR, p_gfx_dpa_setting->DFPLow, 0x0F); break; } case INTERFACE_DFP: { viafb_write_reg_mask(CR97, VIACR, p_gfx_dpa_setting->DFPHigh, 0x0F); viafb_write_reg_mask(CR99, VIACR, p_gfx_dpa_setting->DFPLow, 0x0F); break; } } } void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, const struct fb_videomode *mode) { var->pixclock = mode->pixclock; var->xres = mode->xres; var->yres = mode->yres; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->hsync_len = mode->hsync_len; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->vsync_len = mode->vsync_len; var->sync = mode->sync; }
gpl-2.0
shengdie/simon_kernel_l01f_kk
drivers/video/via/lcd.c
4989
31781
/* * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/via-core.h> #include <linux/via_i2c.h> #include "global.h" #define viafb_compact_res(x, y) (((x)<<16)|(y)) /* CLE266 Software Power Sequence */ /* {Mask}, {Data}, {Delay} */ static const int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01} }; static const int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01} }; static struct _lcd_scaling_factor lcd_scaling_factor = { /* LCD Horizontal Scaling Factor Register */ {LCD_HOR_SCALING_FACTOR_REG_NUM, {{CR9F, 0, 1}, {CR77, 0, 7}, {CR79, 4, 5} } }, /* LCD Vertical Scaling Factor Register */ {LCD_VER_SCALING_FACTOR_REG_NUM, {{CR79, 3, 3}, {CR78, 0, 7}, {CR79, 6, 7} } } }; static struct _lcd_scaling_factor lcd_scaling_factor_CLE = { /* LCD Horizontal Scaling Factor Register */ {LCD_HOR_SCALING_FACTOR_REG_NUM_CLE, {{CR77, 0, 7}, {CR79, 4, 5} } }, /* LCD Vertical Scaling Factor Register */ {LCD_VER_SCALING_FACTOR_REG_NUM_CLE, {{CR78, 0, 7}, {CR79, 6, 7} } } }; static bool lvds_identify_integratedlvds(void); static void __devinit fp_id_to_vindex(int panel_id); static int lvds_register_read(int index); static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres, int panel_vres); static void lcd_patch_skew_dvp0(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_patch_skew_dvp1(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_patch_skew(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void integrated_lvds_disable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void integrated_lvds_enable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_powersequence_off(void); static void lcd_powersequence_on(void); static void fill_lcd_format(void); static void check_diport_of_integrated_lvds( struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info); static inline bool check_lvds_chip(int device_id_subaddr, int device_id) { return lvds_register_read(device_id_subaddr) == device_id; } void __devinit viafb_init_lcd_size(void) { DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n"); fp_id_to_vindex(viafb_lcd_panel_id); viaparinfo->lvds_setting_info2->lcd_panel_hres = viaparinfo->lvds_setting_info->lcd_panel_hres; viaparinfo->lvds_setting_info2->lcd_panel_vres = viaparinfo->lvds_setting_info->lcd_panel_vres; viaparinfo->lvds_setting_info2->device_lcd_dualedge = viaparinfo->lvds_setting_info->device_lcd_dualedge; viaparinfo->lvds_setting_info2->LCDDithering = viaparinfo->lvds_setting_info->LCDDithering; } static bool lvds_identify_integratedlvds(void) { if (viafb_display_hardware_layout == HW_LAYOUT_LCD_EXTERNAL_LCD2) { /* Two dual channel LCD (Internal LVDS + External LVDS): */ /* If we have an external LVDS, such as VT1636, we should have its chip ID already. */ if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Support two dual channel LVDS! " "(Internal LVDS + External LVDS)\n"); } else { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Not found external LVDS, " "so can't support two dual channel LVDS!\n"); } } else if (viafb_display_hardware_layout == HW_LAYOUT_LCD1_LCD2) { /* Two single channel LCD (Internal LVDS + Internal LVDS): */ viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Support two single channel LVDS! " "(Internal LVDS + Internal LVDS)\n"); } else if (viafb_display_hardware_layout != HW_LAYOUT_DVI_ONLY) { /* If we have found external LVDS, just use it, otherwise, we will use internal LVDS as default. */ if (!viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Found Integrated LVDS!\n"); } } else { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = NON_LVDS_TRANSMITTER; DEBUG_MSG(KERN_INFO "Do not support LVDS!\n"); return false; } return true; } bool __devinit viafb_lvds_trasmitter_identify(void) { if (viafb_lvds_identify_vt1636(VIA_PORT_31)) { viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31; DEBUG_MSG(KERN_INFO "Found VIA VT1636 LVDS on port i2c 0x31\n"); } else { if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) { viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_2C; DEBUG_MSG(KERN_INFO "Found VIA VT1636 LVDS on port gpio 0x2c\n"); } } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) lvds_identify_integratedlvds(); if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) return true; /* Check for VT1631: */ viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = VT1631_LVDS; viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr = VT1631_LVDS_I2C_ADDR; if (check_lvds_chip(VT1631_DEVICE_ID_REG, VT1631_DEVICE_ID)) { DEBUG_MSG(KERN_INFO "\n VT1631 LVDS ! \n"); DEBUG_MSG(KERN_INFO "\n %2d", viaparinfo->chip_info->lvds_chip_info.lvds_chip_name); DEBUG_MSG(KERN_INFO "\n %2d", viaparinfo->chip_info->lvds_chip_info.lvds_chip_name); return true; } viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = NON_LVDS_TRANSMITTER; viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr = VT1631_LVDS_I2C_ADDR; return false; } static void __devinit fp_id_to_vindex(int panel_id) { DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n"); if (panel_id > LCD_PANEL_ID_MAXIMUM) viafb_lcd_panel_id = panel_id = viafb_read_reg(VIACR, CR3F) & 0x0F; switch (panel_id) { case 0x0: viaparinfo->lvds_setting_info->lcd_panel_hres = 640; viaparinfo->lvds_setting_info->lcd_panel_vres = 480; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x1: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x2: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x3: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x4: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 1024; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x5: viaparinfo->lvds_setting_info->lcd_panel_hres = 1400; viaparinfo->lvds_setting_info->lcd_panel_vres = 1050; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x6: viaparinfo->lvds_setting_info->lcd_panel_hres = 1600; viaparinfo->lvds_setting_info->lcd_panel_vres = 1200; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x8: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 480; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x9: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0xA: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xB: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xC: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xD: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 1024; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xE: viaparinfo->lvds_setting_info->lcd_panel_hres = 1400; viaparinfo->lvds_setting_info->lcd_panel_vres = 1050; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xF: viaparinfo->lvds_setting_info->lcd_panel_hres = 1600; viaparinfo->lvds_setting_info->lcd_panel_vres = 1200; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x10: viaparinfo->lvds_setting_info->lcd_panel_hres = 1366; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x11: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x12: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x13: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 800; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x14: viaparinfo->lvds_setting_info->lcd_panel_hres = 1360; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x15: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x16: viaparinfo->lvds_setting_info->lcd_panel_hres = 480; viaparinfo->lvds_setting_info->lcd_panel_vres = 640; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x17: /* OLPC XO-1.5 panel */ viaparinfo->lvds_setting_info->lcd_panel_hres = 1200; viaparinfo->lvds_setting_info->lcd_panel_vres = 900; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; default: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; } } static int lvds_register_read(int index) { u8 data; viafb_i2c_readbyte(VIA_PORT_2C, (u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr, (u8) index, &data); return data; } static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres, int panel_vres) { int reg_value = 0; int viafb_load_reg_num; struct io_register *reg = NULL; DEBUG_MSG(KERN_INFO "load_lcd_scaling()!!\n"); /* LCD Scaling Enable */ viafb_write_reg_mask(CR79, VIACR, 0x07, BIT0 + BIT1 + BIT2); /* Check if expansion for horizontal */ if (set_hres < panel_hres) { /* Load Horizontal Scaling Factor */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: reg_value = CLE266_LCD_HOR_SCF_FORMULA(set_hres, panel_hres); viafb_load_reg_num = lcd_scaling_factor_CLE.lcd_hor_scaling_factor. reg_num; reg = lcd_scaling_factor_CLE.lcd_hor_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: case UNICHROME_CN750: case UNICHROME_VX800: case UNICHROME_VX855: case UNICHROME_VX900: reg_value = K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres); /* Horizontal scaling enabled */ viafb_write_reg_mask(CRA2, VIACR, 0xC0, BIT7 + BIT6); viafb_load_reg_num = lcd_scaling_factor.lcd_hor_scaling_factor.reg_num; reg = lcd_scaling_factor.lcd_hor_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; } DEBUG_MSG(KERN_INFO "Horizontal Scaling value = %d", reg_value); } else { /* Horizontal scaling disabled */ viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT7); } /* Check if expansion for vertical */ if (set_vres < panel_vres) { /* Load Vertical Scaling Factor */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: reg_value = CLE266_LCD_VER_SCF_FORMULA(set_vres, panel_vres); viafb_load_reg_num = lcd_scaling_factor_CLE.lcd_ver_scaling_factor. reg_num; reg = lcd_scaling_factor_CLE.lcd_ver_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: case UNICHROME_CN750: case UNICHROME_VX800: case UNICHROME_VX855: case UNICHROME_VX900: reg_value = K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres); /* Vertical scaling enabled */ viafb_write_reg_mask(CRA2, VIACR, 0x08, BIT3); viafb_load_reg_num = lcd_scaling_factor.lcd_ver_scaling_factor.reg_num; reg = lcd_scaling_factor.lcd_ver_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; } DEBUG_MSG(KERN_INFO "Vertical Scaling value = %d", reg_value); } else { /* Vertical scaling disabled */ viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT3); } } static void via_pitch_alignment_patch_lcd(int iga_path, int hres, int bpp) { unsigned char cr13, cr35, cr65, cr66, cr67; unsigned long dwScreenPitch = 0; unsigned long dwPitch; dwPitch = hres * (bpp >> 3); if (dwPitch & 0x1F) { dwScreenPitch = ((dwPitch + 31) & ~31) >> 3; if (iga_path == IGA2) { if (bpp > 8) { cr66 = (unsigned char)(dwScreenPitch & 0xFF); viafb_write_reg(CR66, VIACR, cr66); cr67 = viafb_read_reg(VIACR, CR67) & 0xFC; cr67 |= (unsigned char)((dwScreenPitch & 0x300) >> 8); viafb_write_reg(CR67, VIACR, cr67); } /* Fetch Count */ cr67 = viafb_read_reg(VIACR, CR67) & 0xF3; cr67 |= (unsigned char)((dwScreenPitch & 0x600) >> 7); viafb_write_reg(CR67, VIACR, cr67); cr65 = (unsigned char)((dwScreenPitch >> 1) & 0xFF); cr65 += 2; viafb_write_reg(CR65, VIACR, cr65); } else { if (bpp > 8) { cr13 = (unsigned char)(dwScreenPitch & 0xFF); viafb_write_reg(CR13, VIACR, cr13); cr35 = viafb_read_reg(VIACR, CR35) & 0x1F; cr35 |= (unsigned char)((dwScreenPitch & 0x700) >> 3); viafb_write_reg(CR35, VIACR, cr35); } } } } static void lcd_patch_skew_dvp0(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) { switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_P4M900: viafb_vt1636_patch_skew_on_vt3364(plvds_setting_info, plvds_chip_info); break; case UNICHROME_P4M890: viafb_vt1636_patch_skew_on_vt3327(plvds_setting_info, plvds_chip_info); break; } } } static void lcd_patch_skew_dvp1(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) { switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CX700: viafb_vt1636_patch_skew_on_vt3324(plvds_setting_info, plvds_chip_info); break; } } } static void lcd_patch_skew(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { DEBUG_MSG(KERN_INFO "lcd_patch_skew\n"); switch (plvds_chip_info->output_interface) { case INTERFACE_DVP0: lcd_patch_skew_dvp0(plvds_setting_info, plvds_chip_info); break; case INTERFACE_DVP1: lcd_patch_skew_dvp1(plvds_setting_info, plvds_chip_info); break; case INTERFACE_DFP_LOW: if (UNICHROME_P4M900 == viaparinfo->chip_info->gfx_chip_name) { viafb_write_reg_mask(CR99, VIACR, 0x08, BIT0 + BIT1 + BIT2 + BIT3); } break; } } /* LCD Set Mode */ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres, u16 cyres, struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { int set_iga = plvds_setting_info->iga_path; int mode_bpp = var->bits_per_pixel; int set_hres = cxres ? cxres : var->xres; int set_vres = cyres ? cyres : var->yres; int panel_hres = plvds_setting_info->lcd_panel_hres; int panel_vres = plvds_setting_info->lcd_panel_vres; u32 clock; struct display_timing timing; struct fb_var_screeninfo panel_var; const struct fb_videomode *mode_crt_table, *panel_crt_table; DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n"); /* Get mode table */ mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60); /* Get panel table Pointer */ panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60); viafb_fill_var_timing_info(&panel_var, panel_crt_table); DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n"); if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info); clock = PICOS2KHZ(panel_crt_table->pixclock) * 1000; plvds_setting_info->vclk = clock; if (set_iga == IGA2 && (set_hres < panel_hres || set_vres < panel_vres) && plvds_setting_info->display_method == LCD_EXPANDSION) { timing = var_to_timing(&panel_var, panel_hres, panel_vres); load_lcd_scaling(set_hres, set_vres, panel_hres, panel_vres); } else { timing = var_to_timing(&panel_var, set_hres, set_vres); if (set_iga == IGA2) /* disable scaling */ via_write_reg_mask(VIACR, 0x79, 0x00, BIT0 + BIT1 + BIT2); } if (set_iga == IGA1) via_set_primary_timing(&timing); else if (set_iga == IGA2) via_set_secondary_timing(&timing); /* Fetch count for IGA2 only */ viafb_load_fetch_count_reg(set_hres, mode_bpp / 8, set_iga); if ((viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266) && (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400)) viafb_load_FIFO_reg(set_iga, set_hres, set_vres); fill_lcd_format(); viafb_set_vclock(clock, set_iga); lcd_patch_skew(plvds_setting_info, plvds_chip_info); /* If K8M800, enable LCD Prefetch Mode. */ if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) || (UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR6A, VIACR, 0x01, BIT0); /* Patch for non 32bit alignment mode */ via_pitch_alignment_patch_lcd(plvds_setting_info->iga_path, set_hres, var->bits_per_pixel); } static void integrated_lvds_disable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { bool turn_off_first_powersequence = false; bool turn_off_second_powersequence = false; if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface) turn_off_first_powersequence = true; if (INTERFACE_LVDS0 == plvds_chip_info->output_interface) turn_off_first_powersequence = true; if (INTERFACE_LVDS1 == plvds_chip_info->output_interface) turn_off_second_powersequence = true; if (turn_off_second_powersequence) { /* Use second power sequence control: */ /* Turn off power sequence. */ viafb_write_reg_mask(CRD4, VIACR, 0, BIT1); /* Turn off back light. */ viafb_write_reg_mask(CRD3, VIACR, 0xC0, BIT6 + BIT7); } if (turn_off_first_powersequence) { /* Use first power sequence control: */ /* Turn off power sequence. */ viafb_write_reg_mask(CR6A, VIACR, 0, BIT3); /* Turn off back light. */ viafb_write_reg_mask(CR91, VIACR, 0xC0, BIT6 + BIT7); } /* Power off LVDS channel. */ switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0: { viafb_write_reg_mask(CRD2, VIACR, 0x80, BIT7); break; } case INTERFACE_LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0x40, BIT6); break; } case INTERFACE_LVDS0LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0xC0, BIT6 + BIT7); break; } } } static void integrated_lvds_enable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n", plvds_chip_info->output_interface); if (plvds_setting_info->lcd_mode == LCD_SPWG) viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1); else viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1); switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0LVDS1: case INTERFACE_LVDS0: /* Use first power sequence control: */ /* Use hardware control power sequence. */ viafb_write_reg_mask(CR91, VIACR, 0, BIT0); /* Turn on back light. */ viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7); /* Turn on hardware power sequence. */ viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3); break; case INTERFACE_LVDS1: /* Use second power sequence control: */ /* Use hardware control power sequence. */ viafb_write_reg_mask(CRD3, VIACR, 0, BIT0); /* Turn on back light. */ viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7); /* Turn on hardware power sequence. */ viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1); break; } /* Power on LVDS channel. */ switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT7); break; } case INTERFACE_LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT6); break; } case INTERFACE_LVDS0LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT6 + BIT7); break; } } } void viafb_lcd_disable(void) { if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) { lcd_powersequence_off(); /* DI1 pad off */ viafb_write_reg_mask(SR1E, VIASR, 0x00, 0x30); } else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { if (viafb_LCD2_ON && (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name)) integrated_lvds_disable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info2); if (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) integrated_lvds_disable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); if (VT1636_LVDS == viaparinfo->chip_info-> lvds_chip_info.lvds_chip_name) viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else { /* Backlight off */ viafb_write_reg_mask(SR3D, VIASR, 0x00, 0x20); /* 24 bit DI data paht off */ viafb_write_reg_mask(CR91, VIACR, 0x80, 0x80); } /* Disable expansion bit */ viafb_write_reg_mask(CR79, VIACR, 0x00, 0x01); /* Simultaneout disabled */ viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08); } static void set_lcd_output_path(int set_iga, int output_interface) { switch (output_interface) { case INTERFACE_DFP: if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name) || (UNICHROME_P4M890 == viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR97, VIACR, 0x84, BIT7 + BIT2 + BIT1 + BIT0); case INTERFACE_DVP0: case INTERFACE_DVP1: case INTERFACE_DFP_HIGH: case INTERFACE_DFP_LOW: if (set_iga == IGA2) viafb_write_reg(CR91, VIACR, 0x00); break; } } void viafb_lcd_enable(void) { viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3); viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3); set_lcd_output_path(viaparinfo->lvds_setting_info->iga_path, viaparinfo->chip_info->lvds_chip_info.output_interface); if (viafb_LCD2_ON) set_lcd_output_path(viaparinfo->lvds_setting_info2->iga_path, viaparinfo->chip_info-> lvds_chip_info2.output_interface); if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) { /* DI1 pad on */ viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30); lcd_powersequence_on(); } else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { if (viafb_LCD2_ON && (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name)) integrated_lvds_enable(viaparinfo->lvds_setting_info2, \ &viaparinfo->chip_info->lvds_chip_info2); if (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) integrated_lvds_enable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); if (VT1636_LVDS == viaparinfo->chip_info-> lvds_chip_info.lvds_chip_name) viafb_enable_lvds_vt1636(viaparinfo-> lvds_setting_info, &viaparinfo->chip_info-> lvds_chip_info); } else if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viafb_enable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else { /* Backlight on */ viafb_write_reg_mask(SR3D, VIASR, 0x20, 0x20); /* 24 bit DI data paht on */ viafb_write_reg_mask(CR91, VIACR, 0x00, 0x80); /* LCD enabled */ viafb_write_reg_mask(CR6A, VIACR, 0x48, 0x48); } } static void lcd_powersequence_off(void) { int i, mask, data; /* Software control power sequence */ viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11); for (i = 0; i < 3; i++) { mask = PowerSequenceOff[0][i]; data = PowerSequenceOff[1][i] & mask; viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask); udelay(PowerSequenceOff[2][i]); } /* Disable LCD */ viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x08); } static void lcd_powersequence_on(void) { int i, mask, data; /* Software control power sequence */ viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11); /* Enable LCD */ viafb_write_reg_mask(CR6A, VIACR, 0x08, 0x08); for (i = 0; i < 3; i++) { mask = PowerSequenceOn[0][i]; data = PowerSequenceOn[1][i] & mask; viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask); udelay(PowerSequenceOn[2][i]); } udelay(1); } static void fill_lcd_format(void) { u8 bdithering = 0, bdual = 0; if (viaparinfo->lvds_setting_info->device_lcd_dualedge) bdual = BIT4; if (viaparinfo->lvds_setting_info->LCDDithering) bdithering = BIT0; /* Dual & Dithering */ viafb_write_reg_mask(CR88, VIACR, (bdithering | bdual), BIT4 + BIT0); } static void check_diport_of_integrated_lvds( struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info) { /* Determine LCD DI Port by hardware layout. */ switch (viafb_display_hardware_layout) { case HW_LAYOUT_LCD_ONLY: { if (plvds_setting_info->device_lcd_dualedge) { plvds_chip_info->output_interface = INTERFACE_LVDS0LVDS1; } else { plvds_chip_info->output_interface = INTERFACE_LVDS0; } break; } case HW_LAYOUT_DVI_ONLY: { plvds_chip_info->output_interface = INTERFACE_NONE; break; } case HW_LAYOUT_LCD1_LCD2: case HW_LAYOUT_LCD_EXTERNAL_LCD2: { plvds_chip_info->output_interface = INTERFACE_LVDS0LVDS1; break; } case HW_LAYOUT_LCD_DVI: { plvds_chip_info->output_interface = INTERFACE_LVDS1; break; } default: { plvds_chip_info->output_interface = INTERFACE_LVDS1; break; } } DEBUG_MSG(KERN_INFO "Display Hardware Layout: 0x%x, LCD DI Port: 0x%x\n", viafb_display_hardware_layout, plvds_chip_info->output_interface); } void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info) { if (INTERFACE_NONE != plvds_chip_info->output_interface) { /*Do nothing, lcd port is specified by module parameter */ return; } switch (plvds_chip_info->lvds_chip_name) { case VT1636_LVDS: switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CX700: plvds_chip_info->output_interface = INTERFACE_DVP1; break; case UNICHROME_CN700: plvds_chip_info->output_interface = INTERFACE_DFP_LOW; break; default: plvds_chip_info->output_interface = INTERFACE_DVP0; break; } break; case INTEGRATED_LVDS: check_diport_of_integrated_lvds(plvds_chip_info, plvds_setting_info); break; default: switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_K8M890: case UNICHROME_P4M900: case UNICHROME_P4M890: plvds_chip_info->output_interface = INTERFACE_DFP_LOW; break; default: plvds_chip_info->output_interface = INTERFACE_DFP; break; } break; } } bool viafb_lcd_get_mobile_state(bool *mobile) { unsigned char __iomem *romptr, *tableptr, *biosptr; u8 core_base; /* Rom address */ const u32 romaddr = 0x000C0000; u16 start_pattern; biosptr = ioremap(romaddr, 0x10000); start_pattern = readw(biosptr); /* Compare pattern */ if (start_pattern == 0xAA55) { /* Get the start of Table */ /* 0x1B means BIOS offset position */ romptr = biosptr + 0x1B; tableptr = biosptr + readw(romptr); /* Get the start of biosver structure */ /* 18 means BIOS version position. */ romptr = tableptr + 18; romptr = biosptr + readw(romptr); /* The offset should be 44, but the actual image is less three char. */ /* pRom += 44; */ romptr += 41; core_base = readb(romptr); if (core_base & 0x8) *mobile = false; else *mobile = true; /* release memory */ iounmap(biosptr); return true; } else { iounmap(biosptr); return false; } }
gpl-2.0
leehz/android_kernel_samsung_ms013g
drivers/zorro/zorro-driver.c
5245
4554
/* * Zorro Driver Services * * Copyright (C) 2003 Geert Uytterhoeven * * Loosely based on drivers/pci/pci-driver.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/zorro.h> /** * zorro_match_device - Tell if a Zorro device structure has a matching * Zorro device id structure * @ids: array of Zorro device id structures to search in * @dev: the Zorro device structure to match against * * Used by a driver to check whether a Zorro device present in the * system is in its list of supported devices. Returns the matching * zorro_device_id structure or %NULL if there is no match. */ const struct zorro_device_id * zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z) { while (ids->id) { if (ids->id == ZORRO_WILDCARD || ids->id == z->id) return ids; ids++; } return NULL; } EXPORT_SYMBOL(zorro_match_device); static int zorro_device_probe(struct device *dev) { int error = 0; struct zorro_driver *drv = to_zorro_driver(dev->driver); struct zorro_dev *z = to_zorro_dev(dev); if (!z->driver && drv->probe) { const struct zorro_device_id *id; id = zorro_match_device(drv->id_table, z); if (id) error = drv->probe(z, id); if (error >= 0) { z->driver = drv; error = 0; } } return error; } static int zorro_device_remove(struct device *dev) { struct zorro_dev *z = to_zorro_dev(dev); struct zorro_driver *drv = to_zorro_driver(dev->driver); if (drv) { if (drv->remove) drv->remove(z); z->driver = NULL; } return 0; } /** * zorro_register_driver - register a new Zorro driver * @drv: the driver structure to register * * Adds the driver structure to the list of registered drivers * Returns zero or a negative error value. */ int zorro_register_driver(struct zorro_driver *drv) { /* initialize common driver fields */ drv->driver.name = drv->name; drv->driver.bus = &zorro_bus_type; /* register with core */ return driver_register(&drv->driver); } EXPORT_SYMBOL(zorro_register_driver); /** * zorro_unregister_driver - unregister a zorro driver * @drv: the driver structure to unregister * * Deletes the driver structure from the list of registered Zorro drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as * driverless. */ void zorro_unregister_driver(struct zorro_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL(zorro_unregister_driver); /** * zorro_bus_match - Tell if a Zorro device structure has a matching Zorro * device id structure * @ids: array of Zorro device id structures to search in * @dev: the Zorro device structure to match against * * Used by a driver to check whether a Zorro device present in the * system is in its list of supported devices.Returns the matching * zorro_device_id structure or %NULL if there is no match. */ static int zorro_bus_match(struct device *dev, struct device_driver *drv) { struct zorro_dev *z = to_zorro_dev(dev); struct zorro_driver *zorro_drv = to_zorro_driver(drv); const struct zorro_device_id *ids = zorro_drv->id_table; if (!ids) return 0; while (ids->id) { if (ids->id == ZORRO_WILDCARD || ids->id == z->id) return 1; ids++; } return 0; } static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env) { #ifdef CONFIG_HOTPLUG struct zorro_dev *z; if (!dev) return -ENODEV; z = to_zorro_dev(dev); if (!z) return -ENODEV; if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) || add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) || add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) || add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id)) return -ENOMEM; return 0; #else /* !CONFIG_HOTPLUG */ return -ENODEV; #endif /* !CONFIG_HOTPLUG */ } struct bus_type zorro_bus_type = { .name = "zorro", .match = zorro_bus_match, .uevent = zorro_uevent, .probe = zorro_device_probe, .remove = zorro_device_remove, }; EXPORT_SYMBOL(zorro_bus_type); static int __init zorro_driver_init(void) { return bus_register(&zorro_bus_type); } postcore_initcall(zorro_driver_init);
gpl-2.0