repo_name
string
path
string
copies
string
size
string
content
string
license
string
NovaFusion/android_kernel_samsung_arubaslim
mm/cleancache.c
4514
6643
/* * Cleancache frontend * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of cleancache. See * Documentation/vm/cleancache.txt for more information. * * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. * Author: Dan Magenheimer * * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/exportfs.h> #include <linux/mm.h> #include <linux/debugfs.h> #include <linux/cleancache.h> /* * This global enablement flag may be read thousands of times per second * by cleancache_get/put/invalidate even on systems where cleancache_ops * is not claimed (e.g. cleancache is config'ed on but remains * disabled), so is preferred to the slower alternative: a function * call that checks a non-global. */ int cleancache_enabled __read_mostly; EXPORT_SYMBOL(cleancache_enabled); /* * cleancache_ops is set by cleancache_ops_register to contain the pointers * to the cleancache "backend" implementation functions. */ static struct cleancache_ops cleancache_ops __read_mostly; /* * Counters available via /sys/kernel/debug/frontswap (if debugfs is * properly configured. These are for information only so are not protected * against increment races. */ static u64 cleancache_succ_gets; static u64 cleancache_failed_gets; static u64 cleancache_puts; static u64 cleancache_invalidates; /* * register operations for cleancache, returning previous thus allowing * detection of multiple backends and possible nesting */ struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) { struct cleancache_ops old = cleancache_ops; cleancache_ops = *ops; cleancache_enabled = 1; return old; } EXPORT_SYMBOL(cleancache_register_ops); /* Called by a cleancache-enabled filesystem at time of mount */ void __cleancache_init_fs(struct super_block *sb) { sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE); } EXPORT_SYMBOL(__cleancache_init_fs); /* Called by a cleancache-enabled clustered filesystem at time of mount */ void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) { sb->cleancache_poolid = (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE); } EXPORT_SYMBOL(__cleancache_init_shared_fs); /* * If the filesystem uses exportable filehandles, use the filehandle as * the key, else use the inode number. */ static int cleancache_get_key(struct inode *inode, struct cleancache_filekey *key) { int (*fhfn)(struct dentry *, __u32 *fh, int *, int); int len = 0, maxlen = CLEANCACHE_KEY_MAX; struct super_block *sb = inode->i_sb; key->u.ino = inode->i_ino; if (sb->s_export_op != NULL) { fhfn = sb->s_export_op->encode_fh; if (fhfn) { struct dentry d; d.d_inode = inode; len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0); if (len <= 0 || len == 255) return -1; if (maxlen > CLEANCACHE_KEY_MAX) return -1; } } return 0; } /* * "Get" data from cleancache associated with the poolid/inode/index * that were specified when the data was put to cleanache and, if * successful, use it to fill the specified page with data and return 0. * The pageframe is unchanged and returns -1 if the get fails. * Page must be locked by caller. */ int __cleancache_get_page(struct page *page) { int ret = -1; int pool_id; struct cleancache_filekey key = { .u.key = { 0 } }; VM_BUG_ON(!PageLocked(page)); pool_id = page->mapping->host->i_sb->cleancache_poolid; if (pool_id < 0) goto out; if (cleancache_get_key(page->mapping->host, &key) < 0) goto out; ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page); if (ret == 0) cleancache_succ_gets++; else cleancache_failed_gets++; out: return ret; } EXPORT_SYMBOL(__cleancache_get_page); /* * "Put" data from a page to cleancache and associate it with the * (previously-obtained per-filesystem) poolid and the page's, * inode and page index. Page must be locked. Note that a put_page * always "succeeds", though a subsequent get_page may succeed or fail. */ void __cleancache_put_page(struct page *page) { int pool_id; struct cleancache_filekey key = { .u.key = { 0 } }; VM_BUG_ON(!PageLocked(page)); pool_id = page->mapping->host->i_sb->cleancache_poolid; if (pool_id >= 0 && cleancache_get_key(page->mapping->host, &key) >= 0) { (*cleancache_ops.put_page)(pool_id, key, page->index, page); cleancache_puts++; } } EXPORT_SYMBOL(__cleancache_put_page); /* * Invalidate any data from cleancache associated with the poolid and the * page's inode and page index so that a subsequent "get" will fail. */ void __cleancache_invalidate_page(struct address_space *mapping, struct page *page) { /* careful... page->mapping is NULL sometimes when this is called */ int pool_id = mapping->host->i_sb->cleancache_poolid; struct cleancache_filekey key = { .u.key = { 0 } }; if (pool_id >= 0) { VM_BUG_ON(!PageLocked(page)); if (cleancache_get_key(mapping->host, &key) >= 0) { (*cleancache_ops.invalidate_page)(pool_id, key, page->index); cleancache_invalidates++; } } } EXPORT_SYMBOL(__cleancache_invalidate_page); /* * Invalidate all data from cleancache associated with the poolid and the * mappings's inode so that all subsequent gets to this poolid/inode * will fail. */ void __cleancache_invalidate_inode(struct address_space *mapping) { int pool_id = mapping->host->i_sb->cleancache_poolid; struct cleancache_filekey key = { .u.key = { 0 } }; if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) (*cleancache_ops.invalidate_inode)(pool_id, key); } EXPORT_SYMBOL(__cleancache_invalidate_inode); /* * Called by any cleancache-enabled filesystem at time of unmount; * note that pool_id is surrendered and may be reutrned by a subsequent * cleancache_init_fs or cleancache_init_shared_fs */ void __cleancache_invalidate_fs(struct super_block *sb) { if (sb->cleancache_poolid >= 0) { int old_poolid = sb->cleancache_poolid; sb->cleancache_poolid = -1; (*cleancache_ops.invalidate_fs)(old_poolid); } } EXPORT_SYMBOL(__cleancache_invalidate_fs); static int __init init_cleancache(void) { #ifdef CONFIG_DEBUG_FS struct dentry *root = debugfs_create_dir("cleancache", NULL); if (root == NULL) return -ENXIO; debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); debugfs_create_u64("failed_gets", S_IRUGO, root, &cleancache_failed_gets); debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); debugfs_create_u64("invalidates", S_IRUGO, root, &cleancache_invalidates); #endif return 0; } module_init(init_cleancache)
gpl-2.0
CelerityEDS/linux-imx
sound/soc/fsl/mpc5200_psc_i2s.c
5026
6537
/* * Freescale MPC5200 PSC in I2S mode * ALSA SoC Digital Audio Interface (DAI) driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2009 Jon Smirl, Digispeaker */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" /** * PSC_I2S_RATES: sample rates supported by the I2S * * This driver currently only supports the PSC running in I2S slave mode, * which means the codec determines the sample rate. Therefore, we tell * ALSA that we support all rates and let the codec driver decide what rates * are really supported. */ #define PSC_I2S_RATES (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_192000 | \ SNDRV_PCM_RATE_CONTINUOUS) /** * PSC_I2S_FORMATS: audio formats supported by the PSC I2S mode */ #define PSC_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE) static int psc_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); u32 mode; dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i" " periods=%i buffer_size=%i buffer_bytes=%i\n", __func__, substream, params_period_size(params), params_period_bytes(params), params_periods(params), params_buffer_size(params), params_buffer_bytes(params)); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: mode = MPC52xx_PSC_SICR_SIM_CODEC_8; break; case SNDRV_PCM_FORMAT_S16_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_16; break; case SNDRV_PCM_FORMAT_S24_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_24; break; case SNDRV_PCM_FORMAT_S32_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_32; break; default: dev_dbg(psc_dma->dev, "invalid format\n"); return -EINVAL; } out_be32(&psc_dma->psc_regs->sicr, psc_dma->sicr | mode); return 0; } /** * psc_i2s_set_sysclk: set the clock frequency and direction * * This function is called by the machine driver to tell us what the clock * frequency and direction are. * * Currently, we only support operating as a clock slave (SND_SOC_CLOCK_IN), * and we don't care about the frequency. Return an error if the direction * is not SND_SOC_CLOCK_IN. * * @clk_id: reserved, should be zero * @freq: the frequency of the given clock ID, currently ignored * @dir: SND_SOC_CLOCK_IN (clock slave) or SND_SOC_CLOCK_OUT (clock master) */ static int psc_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "psc_i2s_set_sysclk(cpu_dai=%p, dir=%i)\n", cpu_dai, dir); return (dir == SND_SOC_CLOCK_IN) ? 0 : -EINVAL; } /** * psc_i2s_set_fmt: set the serial format. * * This function is called by the machine driver to tell us what serial * format to use. * * This driver only supports I2S mode. Return an error if the format is * not SND_SOC_DAIFMT_I2S. * * @format: one of SND_SOC_DAIFMT_xxx */ static int psc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "psc_i2s_set_fmt(cpu_dai=%p, format=%i)\n", cpu_dai, format); return (format == SND_SOC_DAIFMT_I2S) ? 0 : -EINVAL; } /* --------------------------------------------------------------------- * ALSA SoC Bindings * * - Digital Audio Interface (DAI) template * - create/destroy dai hooks */ /** * psc_i2s_dai_template: template CPU Digital Audio Interface */ static const struct snd_soc_dai_ops psc_i2s_dai_ops = { .hw_params = psc_i2s_hw_params, .set_sysclk = psc_i2s_set_sysclk, .set_fmt = psc_i2s_set_fmt, }; static struct snd_soc_dai_driver psc_i2s_dai[] = {{ .playback = { .channels_min = 2, .channels_max = 2, .rates = PSC_I2S_RATES, .formats = PSC_I2S_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = PSC_I2S_RATES, .formats = PSC_I2S_FORMATS, }, .ops = &psc_i2s_dai_ops, } }; /* --------------------------------------------------------------------- * OF platform bus binding code: * - Probe/remove operations * - OF device match table */ static int __devinit psc_i2s_of_probe(struct platform_device *op) { int rc; struct psc_dma *psc_dma; struct mpc52xx_psc __iomem *regs; rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); if (rc != 0) { pr_err("Failed to register DAI\n"); return rc; } psc_dma = dev_get_drvdata(&op->dev); regs = psc_dma->psc_regs; /* Configure the serial interface mode; defaulting to CODEC8 mode */ psc_dma->sicr = MPC52xx_PSC_SICR_DTS1 | MPC52xx_PSC_SICR_I2S | MPC52xx_PSC_SICR_CLKPOL; out_be32(&psc_dma->psc_regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_SIM_CODEC_8); /* Check for the codec handle. If it is not present then we * are done */ if (!of_get_property(op->dev.of_node, "codec-handle", NULL)) return 0; /* Due to errata in the dma mode; need to line up enabling * the transmitter with a transition on the frame sync * line */ /* first make sure it is low */ while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) != 0) ; /* then wait for the transition to high */ while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) == 0) ; /* Finally, enable the PSC. * Receiver must always be enabled; even when we only want * transmit. (see 15.3.2.3 of MPC5200B User's Guide) */ /* Go */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } static int __devexit psc_i2s_of_remove(struct platform_device *op) { snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_i2s_dai)); return 0; } /* Match table for of_platform binding */ static struct of_device_id psc_i2s_match[] __devinitdata = { { .compatible = "fsl,mpc5200-psc-i2s", }, { .compatible = "fsl,mpc5200b-psc-i2s", }, {} }; MODULE_DEVICE_TABLE(of, psc_i2s_match); static struct platform_driver psc_i2s_driver = { .probe = psc_i2s_of_probe, .remove = __devexit_p(psc_i2s_of_remove), .driver = { .name = "mpc5200-psc-i2s", .owner = THIS_MODULE, .of_match_table = psc_i2s_match, }, }; module_platform_driver(psc_i2s_driver); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Freescale MPC5200 PSC in I2S mode ASoC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
OMAP4-AOSP/android_kernel_samsung_espresso
net/irda/irlap_frame.c
8354
36783
/********************************************************************* * * Filename: irlap_frame.c * Version: 1.0 * Description: Build and transmit IrLAP frames * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Tue Aug 19 10:27:26 1997 * Modified at: Wed Jan 5 08:59:04 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/irda.h> #include <linux/slab.h> #include <net/pkt_sched.h> #include <net/sock.h> #include <asm/byteorder.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/irlap.h> #include <net/irda/wrapper.h> #include <net/irda/timer.h> #include <net/irda/irlap_frame.h> #include <net/irda/qos.h> static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb, int command); /* * Function irlap_insert_info (self, skb) * * Insert minimum turnaround time and speed information into the skb. We * need to do this since it's per packet relevant information. Safe to * have this function inlined since it's only called from one place */ static inline void irlap_insert_info(struct irlap_cb *self, struct sk_buff *skb) { struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; /* * Insert MTT (min. turn time) and speed into skb, so that the * device driver knows which settings to use */ cb->magic = LAP_MAGIC; cb->mtt = self->mtt_required; cb->next_speed = self->speed; /* Reset */ self->mtt_required = 0; /* * Delay equals negotiated BOFs count, plus the number of BOFs to * force the negotiated minimum turnaround time */ cb->xbofs = self->bofs_count; cb->next_xbofs = self->next_bofs; cb->xbofs_delay = self->xbofs_delay; /* Reset XBOF's delay (used only for getting min turn time) */ self->xbofs_delay = 0; /* Put the correct xbofs value for the next packet */ self->bofs_count = self->next_bofs; } /* * Function irlap_queue_xmit (self, skb) * * A little wrapper for dev_queue_xmit, so we can insert some common * code into it. */ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) { /* Some common init stuff */ skb->dev = self->netdev; skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->priority = TC_PRIO_BESTEFFORT; irlap_insert_info(self, skb); if (unlikely(self->mode & IRDA_MODE_MONITOR)) { IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__, self->netdev->name); dev_kfree_skb(skb); return; } dev_queue_xmit(skb); } /* * Function irlap_send_snrm_cmd (void) * * Transmits a connect SNRM command frame */ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos) { struct sk_buff *tx_skb; struct snrm_frame *frame; int ret; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Allocate frame */ tx_skb = alloc_skb(sizeof(struct snrm_frame) + IRLAP_NEGOCIATION_PARAMS_LEN, GFP_ATOMIC); if (!tx_skb) return; frame = (struct snrm_frame *) skb_put(tx_skb, 2); /* Insert connection address field */ if (qos) frame->caddr = CMD_FRAME | CBROADCAST; else frame->caddr = CMD_FRAME | self->caddr; /* Insert control field */ frame->control = SNRM_CMD | PF_BIT; /* * If we are establishing a connection then insert QoS parameters */ if (qos) { skb_put(tx_skb, 9); /* 25 left */ frame->saddr = cpu_to_le32(self->saddr); frame->daddr = cpu_to_le32(self->daddr); frame->ncaddr = self->caddr; ret = irlap_insert_qos_negotiation_params(self, tx_skb); if (ret < 0) { dev_kfree_skb(tx_skb); return; } } irlap_queue_xmit(self, tx_skb); } /* * Function irlap_recv_snrm_cmd (skb, info) * * Received SNRM (Set Normal Response Mode) command frame * */ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { struct snrm_frame *frame; if (pskb_may_pull(skb,sizeof(struct snrm_frame))) { frame = (struct snrm_frame *) skb->data; /* Copy the new connection address ignoring the C/R bit */ info->caddr = frame->ncaddr & 0xFE; /* Check if the new connection address is valid */ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { IRDA_DEBUG(3, "%s(), invalid connection address!\n", __func__); return; } /* Copy peer device address */ info->daddr = le32_to_cpu(frame->saddr); info->saddr = le32_to_cpu(frame->daddr); /* Only accept if addressed directly to us */ if (info->saddr != self->saddr) { IRDA_DEBUG(2, "%s(), not addressed to us!\n", __func__); return; } irlap_do_event(self, RECV_SNRM_CMD, skb, info); } else { /* Signal that this SNRM frame does not contain and I-field */ irlap_do_event(self, RECV_SNRM_CMD, skb, NULL); } } /* * Function irlap_send_ua_response_frame (qos) * * Send UA (Unnumbered Acknowledgement) frame * */ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos) { struct sk_buff *tx_skb; struct ua_frame *frame; int ret; IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Allocate frame */ tx_skb = alloc_skb(sizeof(struct ua_frame) + IRLAP_NEGOCIATION_PARAMS_LEN, GFP_ATOMIC); if (!tx_skb) return; frame = (struct ua_frame *) skb_put(tx_skb, 10); /* Build UA response */ frame->caddr = self->caddr; frame->control = UA_RSP | PF_BIT; frame->saddr = cpu_to_le32(self->saddr); frame->daddr = cpu_to_le32(self->daddr); /* Should we send QoS negotiation parameters? */ if (qos) { ret = irlap_insert_qos_negotiation_params(self, tx_skb); if (ret < 0) { dev_kfree_skb(tx_skb); return; } } irlap_queue_xmit(self, tx_skb); } /* * Function irlap_send_dm_frame (void) * * Send disconnected mode (DM) frame * */ void irlap_send_dm_frame( struct irlap_cb *self) { struct sk_buff *tx_skb = NULL; struct dm_frame *frame; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); tx_skb = alloc_skb(sizeof(struct dm_frame), GFP_ATOMIC); if (!tx_skb) return; frame = (struct dm_frame *)skb_put(tx_skb, 2); if (self->state == LAP_NDM) frame->caddr = CBROADCAST; else frame->caddr = self->caddr; frame->control = DM_RSP | PF_BIT; irlap_queue_xmit(self, tx_skb); } /* * Function irlap_send_disc_frame (void) * * Send disconnect (DISC) frame * */ void irlap_send_disc_frame(struct irlap_cb *self) { struct sk_buff *tx_skb = NULL; struct disc_frame *frame; IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); tx_skb = alloc_skb(sizeof(struct disc_frame), GFP_ATOMIC); if (!tx_skb) return; frame = (struct disc_frame *)skb_put(tx_skb, 2); frame->caddr = self->caddr | CMD_FRAME; frame->control = DISC_CMD | PF_BIT; irlap_queue_xmit(self, tx_skb); } /* * Function irlap_send_discovery_xid_frame (S, s, command) * * Build and transmit a XID (eXchange station IDentifier) discovery * frame. */ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s, __u8 command, discovery_t *discovery) { struct sk_buff *tx_skb = NULL; struct xid_frame *frame; __u32 bcast = BROADCAST; __u8 *info; IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__, s, S, command); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); tx_skb = alloc_skb(sizeof(struct xid_frame) + IRLAP_DISCOVERY_INFO_LEN, GFP_ATOMIC); if (!tx_skb) return; skb_put(tx_skb, 14); frame = (struct xid_frame *) tx_skb->data; if (command) { frame->caddr = CBROADCAST | CMD_FRAME; frame->control = XID_CMD | PF_BIT; } else { frame->caddr = CBROADCAST; frame->control = XID_RSP | PF_BIT; } frame->ident = XID_FORMAT; frame->saddr = cpu_to_le32(self->saddr); if (command) frame->daddr = cpu_to_le32(bcast); else frame->daddr = cpu_to_le32(discovery->data.daddr); switch (S) { case 1: frame->flags = 0x00; break; case 6: frame->flags = 0x01; break; case 8: frame->flags = 0x02; break; case 16: frame->flags = 0x03; break; default: frame->flags = 0x02; break; } frame->slotnr = s; frame->version = 0x00; /* * Provide info for final slot only in commands, and for all * responses. Send the second byte of the hint only if the * EXTENSION bit is set in the first byte. */ if (!command || (frame->slotnr == 0xff)) { int len; if (discovery->data.hints[0] & HINT_EXTENSION) { info = skb_put(tx_skb, 2); info[0] = discovery->data.hints[0]; info[1] = discovery->data.hints[1]; } else { info = skb_put(tx_skb, 1); info[0] = discovery->data.hints[0]; } info = skb_put(tx_skb, 1); info[0] = discovery->data.charset; len = IRDA_MIN(discovery->name_len, skb_tailroom(tx_skb)); info = skb_put(tx_skb, len); memcpy(info, discovery->data.info, len); } irlap_queue_xmit(self, tx_skb); } /* * Function irlap_recv_discovery_xid_rsp (skb, info) * * Received a XID discovery response * */ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { struct xid_frame *xid; discovery_t *discovery = NULL; __u8 *discovery_info; char *text; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { IRDA_ERROR("%s: frame too short!\n", __func__); return; } xid = (struct xid_frame *) skb->data; info->daddr = le32_to_cpu(xid->saddr); info->saddr = le32_to_cpu(xid->daddr); /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", __func__); return; } if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { IRDA_WARNING("%s: kmalloc failed!\n", __func__); return; } discovery->data.daddr = info->daddr; discovery->data.saddr = self->saddr; discovery->timestamp = jiffies; IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, discovery->data.daddr); discovery_info = skb_pull(skb, sizeof(struct xid_frame)); /* Get info returned from peer */ discovery->data.hints[0] = discovery_info[0]; if (discovery_info[0] & HINT_EXTENSION) { IRDA_DEBUG(4, "EXTENSION\n"); discovery->data.hints[1] = discovery_info[1]; discovery->data.charset = discovery_info[2]; text = (char *) &discovery_info[3]; } else { discovery->data.hints[1] = 0; discovery->data.charset = discovery_info[1]; text = (char *) &discovery_info[2]; } /* * Terminate info string, should be safe since this is where the * FCS bytes resides. */ skb->data[skb->len] = '\0'; strncpy(discovery->data.info, text, NICKNAME_MAX_LEN); discovery->name_len = strlen(discovery->data.info); info->discovery = discovery; irlap_do_event(self, RECV_DISCOVERY_XID_RSP, skb, info); } /* * Function irlap_recv_discovery_xid_cmd (skb, info) * * Received a XID discovery command * */ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { struct xid_frame *xid; discovery_t *discovery = NULL; __u8 *discovery_info; char *text; if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { IRDA_ERROR("%s: frame too short!\n", __func__); return; } xid = (struct xid_frame *) skb->data; info->daddr = le32_to_cpu(xid->saddr); info->saddr = le32_to_cpu(xid->daddr); /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", __func__); return; } switch (xid->flags & 0x03) { case 0x00: info->S = 1; break; case 0x01: info->S = 6; break; case 0x02: info->S = 8; break; case 0x03: info->S = 16; break; default: /* Error!! */ return; } info->s = xid->slotnr; discovery_info = skb_pull(skb, sizeof(struct xid_frame)); /* * Check if last frame */ if (info->s == 0xff) { /* Check if things are sane at this point... */ if((discovery_info == NULL) || !pskb_may_pull(skb, 3)) { IRDA_ERROR("%s: discovery frame too short!\n", __func__); return; } /* * We now have some discovery info to deliver! */ discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); if (!discovery) { IRDA_WARNING("%s: unable to malloc!\n", __func__); return; } discovery->data.daddr = info->daddr; discovery->data.saddr = self->saddr; discovery->timestamp = jiffies; discovery->data.hints[0] = discovery_info[0]; if (discovery_info[0] & HINT_EXTENSION) { discovery->data.hints[1] = discovery_info[1]; discovery->data.charset = discovery_info[2]; text = (char *) &discovery_info[3]; } else { discovery->data.hints[1] = 0; discovery->data.charset = discovery_info[1]; text = (char *) &discovery_info[2]; } /* * Terminate string, should be safe since this is where the * FCS bytes resides. */ skb->data[skb->len] = '\0'; strncpy(discovery->data.info, text, NICKNAME_MAX_LEN); discovery->name_len = strlen(discovery->data.info); info->discovery = discovery; } else info->discovery = NULL; irlap_do_event(self, RECV_DISCOVERY_XID_CMD, skb, info); } /* * Function irlap_send_rr_frame (self, command) * * Build and transmit RR (Receive Ready) frame. Notice that it is currently * only possible to send RR frames with the poll bit set. */ void irlap_send_rr_frame(struct irlap_cb *self, int command) { struct sk_buff *tx_skb; struct rr_frame *frame; tx_skb = alloc_skb(sizeof(struct rr_frame), GFP_ATOMIC); if (!tx_skb) return; frame = (struct rr_frame *)skb_put(tx_skb, 2); frame->caddr = self->caddr; frame->caddr |= (command) ? CMD_FRAME : 0; frame->control = RR | PF_BIT | (self->vr << 5); irlap_queue_xmit(self, tx_skb); } /* * Function irlap_send_rd_frame (self) * * Request disconnect. Used by a secondary station to request the * disconnection of the link. */ void irlap_send_rd_frame(struct irlap_cb *self) { struct sk_buff *tx_skb; struct rd_frame *frame; tx_skb = alloc_skb(sizeof(struct rd_frame), GFP_ATOMIC); if (!tx_skb) return; frame = (struct rd_frame *)skb_put(tx_skb, 2); frame->caddr = self->caddr; frame->caddr = RD_RSP | PF_BIT; irlap_queue_xmit(self, tx_skb); } /* * Function irlap_recv_rr_frame (skb, info) * * Received RR (Receive Ready) frame from peer station, no harm in * making it inline since its called only from one single place * (irlap_driver_rcv). */ static inline void irlap_recv_rr_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_RR_CMD, skb, info); else irlap_do_event(self, RECV_RR_RSP, skb, info); } /* * Function irlap_recv_rnr_frame (self, skb, info) * * Received RNR (Receive Not Ready) frame from peer station * */ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { info->nr = skb->data[1] >> 5; IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); if (command) irlap_do_event(self, RECV_RNR_CMD, skb, info); else irlap_do_event(self, RECV_RNR_RSP, skb, info); } static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { IRDA_DEBUG(0, "%s()\n", __func__); info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_REJ_CMD, skb, info); else irlap_do_event(self, RECV_REJ_RSP, skb, info); } static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { IRDA_DEBUG(0, "%s()\n", __func__); info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_SREJ_CMD, skb, info); else irlap_do_event(self, RECV_SREJ_RSP, skb, info); } static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { IRDA_DEBUG(2, "%s()\n", __func__); /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_DISC_CMD, skb, info); else irlap_do_event(self, RECV_RD_RSP, skb, info); } /* * Function irlap_recv_ua_frame (skb, frame) * * Received UA (Unnumbered Acknowledgement) frame * */ static inline void irlap_recv_ua_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { irlap_do_event(self, RECV_UA_RSP, skb, info); } /* * Function irlap_send_data_primary(self, skb) * * Send I-frames as the primary station but without the poll bit set * */ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb) { struct sk_buff *tx_skb; if (skb->data[1] == I_FRAME) { /* * Insert frame sequence number (Vs) in control field before * inserting into transmit window queue. */ skb->data[1] = I_FRAME | (self->vs << 1); /* * Insert frame in store, in case of retransmissions * Increase skb reference count, see irlap_do_event() */ skb_get(skb); skb_queue_tail(&self->wx_list, skb); /* Copy buffer */ tx_skb = skb_clone(skb, GFP_ATOMIC); if (tx_skb == NULL) { return; } self->vs = (self->vs + 1) % 8; self->ack_required = FALSE; self->window -= 1; irlap_send_i_frame( self, tx_skb, CMD_FRAME); } else { IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); self->window -= 1; } } /* * Function irlap_send_data_primary_poll (self, skb) * * Send I(nformation) frame as primary with poll bit set */ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) { struct sk_buff *tx_skb; int transmission_time; /* Stop P timer */ del_timer(&self->poll_timer); /* Is this reliable or unreliable data? */ if (skb->data[1] == I_FRAME) { /* * Insert frame sequence number (Vs) in control field before * inserting into transmit window queue. */ skb->data[1] = I_FRAME | (self->vs << 1); /* * Insert frame in store, in case of retransmissions * Increase skb reference count, see irlap_do_event() */ skb_get(skb); skb_queue_tail(&self->wx_list, skb); /* Copy buffer */ tx_skb = skb_clone(skb, GFP_ATOMIC); if (tx_skb == NULL) { return; } /* * Set poll bit if necessary. We do this to the copied * skb, since retransmitted need to set or clear the poll * bit depending on when they are sent. */ tx_skb->data[1] |= PF_BIT; self->vs = (self->vs + 1) % 8; self->ack_required = FALSE; irlap_next_state(self, LAP_NRM_P); irlap_send_i_frame(self, tx_skb, CMD_FRAME); } else { IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); irlap_next_state(self, LAP_NRM_P); irlap_send_rr_frame(self, CMD_FRAME); self->ack_required = FALSE; } else { skb->data[1] |= PF_BIT; irlap_next_state(self, LAP_NRM_P); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); } } /* How much time we took for transmission of all frames. * We don't know, so let assume we used the full window. Jean II */ transmission_time = self->final_timeout; /* Reset parameter so that we can fill next window */ self->window = self->window_size; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW /* Remove what we have not used. Just do a prorata of the * bytes left in window to window capacity. * See max_line_capacities[][] in qos.c for details. Jean II */ transmission_time -= (self->final_timeout * self->bytes_left / self->line_capacity); IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); /* We are allowed to transmit a maximum number of bytes again. */ self->bytes_left = self->line_capacity; #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * The network layer has a intermediate buffer between IrLAP * and the IrDA driver which can contain 8 frames. So, even * though IrLAP is currently sending the *last* frame of the * tx-window, the driver most likely has only just started * sending the *first* frame of the same tx-window. * I.e. we are always at the very beginning of or Tx window. * Now, we are supposed to set the final timer from the end * of our tx-window to let the other peer reply. So, we need * to add extra time to compensate for the fact that we * are really at the start of tx-window, otherwise the final timer * might expire before he can answer... * Jean II */ irlap_start_final_timer(self, self->final_timeout + transmission_time); /* * The clever amongst you might ask why we do this adjustement * only here, and not in all the other cases in irlap_event.c. * In all those other case, we only send a very short management * frame (few bytes), so the adjustement would be lost in the * noise... * The exception of course is irlap_resend_rejected_frame(). * Jean II */ } /* * Function irlap_send_data_secondary_final (self, skb) * * Send I(nformation) frame as secondary with final bit set * */ void irlap_send_data_secondary_final(struct irlap_cb *self, struct sk_buff *skb) { struct sk_buff *tx_skb = NULL; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Is this reliable or unreliable data? */ if (skb->data[1] == I_FRAME) { /* * Insert frame sequence number (Vs) in control field before * inserting into transmit window queue. */ skb->data[1] = I_FRAME | (self->vs << 1); /* * Insert frame in store, in case of retransmissions * Increase skb reference count, see irlap_do_event() */ skb_get(skb); skb_queue_tail(&self->wx_list, skb); tx_skb = skb_clone(skb, GFP_ATOMIC); if (tx_skb == NULL) { return; } tx_skb->data[1] |= PF_BIT; self->vs = (self->vs + 1) % 8; self->ack_required = FALSE; irlap_send_i_frame(self, tx_skb, RSP_FRAME); } else { if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); irlap_send_rr_frame(self, RSP_FRAME); self->ack_required = FALSE; } else { skb->data[1] |= PF_BIT; irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); } } self->window = self->window_size; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW /* We are allowed to transmit a maximum number of bytes again. */ self->bytes_left = self->line_capacity; #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ irlap_start_wd_timer(self, self->wd_timeout); } /* * Function irlap_send_data_secondary (self, skb) * * Send I(nformation) frame as secondary without final bit set * */ void irlap_send_data_secondary(struct irlap_cb *self, struct sk_buff *skb) { struct sk_buff *tx_skb = NULL; /* Is this reliable or unreliable data? */ if (skb->data[1] == I_FRAME) { /* * Insert frame sequence number (Vs) in control field before * inserting into transmit window queue. */ skb->data[1] = I_FRAME | (self->vs << 1); /* * Insert frame in store, in case of retransmissions * Increase skb reference count, see irlap_do_event() */ skb_get(skb); skb_queue_tail(&self->wx_list, skb); tx_skb = skb_clone(skb, GFP_ATOMIC); if (tx_skb == NULL) { return; } self->vs = (self->vs + 1) % 8; self->ack_required = FALSE; self->window -= 1; irlap_send_i_frame(self, tx_skb, RSP_FRAME); } else { irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); self->window -= 1; } } /* * Function irlap_resend_rejected_frames (nr) * * Resend frames which has not been acknowledged. Should be safe to * traverse the list without locking it since this function will only be * called from interrupt context (BH) */ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) { struct sk_buff *tx_skb; struct sk_buff *skb; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Resend unacknowledged frame(s) */ skb_queue_walk(&self->wx_list, skb) { irlap_wait_min_turn_around(self, &self->qos_tx); /* We copy the skb to be retransmitted since we will have to * modify it. Cloning will confuse packet sniffers */ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); return; } /* Clear old Nr field + poll bit */ tx_skb->data[1] &= 0x0f; /* * Set poll bit on the last frame retransmitted */ if (skb_queue_is_last(&self->wx_list, skb)) tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ else tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ irlap_send_i_frame(self, tx_skb, command); } #if 0 /* Not yet */ /* * We can now fill the window with additional data frames */ while (!skb_queue_empty(&self->txq)) { IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__); if (self->window > 0) { skb = skb_dequeue( &self->txq); IRDA_ASSERT(skb != NULL, return;); /* * If send window > 1 then send frame with pf * bit cleared */ if ((self->window > 1) && !skb_queue_empty(&self->txq)) { irlap_send_data_primary(self, skb); } else { irlap_send_data_primary_poll(self, skb); } kfree_skb(skb); } } #endif } void irlap_resend_rejected_frame(struct irlap_cb *self, int command) { struct sk_buff *tx_skb; struct sk_buff *skb; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Resend unacknowledged frame(s) */ skb = skb_peek(&self->wx_list); if (skb != NULL) { irlap_wait_min_turn_around(self, &self->qos_tx); /* We copy the skb to be retransmitted since we will have to * modify it. Cloning will confuse packet sniffers */ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); return; } /* Clear old Nr field + poll bit */ tx_skb->data[1] &= 0x0f; /* Set poll/final bit */ tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ irlap_send_i_frame(self, tx_skb, command); } } /* * Function irlap_send_ui_frame (self, skb, command) * * Contruct and transmit an Unnumbered Information (UI) frame * */ void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 caddr, int command) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Insert connection address */ skb->data[0] = caddr | ((command) ? CMD_FRAME : 0); irlap_queue_xmit(self, skb); } /* * Function irlap_send_i_frame (skb) * * Contruct and transmit Information (I) frame */ static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb, int command) { /* Insert connection address */ skb->data[0] = self->caddr; skb->data[0] |= (command) ? CMD_FRAME : 0; /* Insert next to receive (Vr) */ skb->data[1] |= (self->vr << 5); /* insert nr */ irlap_queue_xmit(self, skb); } /* * Function irlap_recv_i_frame (skb, frame) * * Receive and parse an I (Information) frame, no harm in making it inline * since it's called only from one single place (irlap_driver_rcv). */ static inline void irlap_recv_i_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { info->nr = skb->data[1] >> 5; /* Next to receive */ info->pf = skb->data[1] & PF_BIT; /* Final bit */ info->ns = (skb->data[1] >> 1) & 0x07; /* Next to send */ /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_I_CMD, skb, info); else irlap_do_event(self, RECV_I_RSP, skb, info); } /* * Function irlap_recv_ui_frame (self, skb, info) * * Receive and parse an Unnumbered Information (UI) frame * */ static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { IRDA_DEBUG( 4, "%s()\n", __func__); info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_do_event(self, RECV_UI_FRAME, skb, info); } /* * Function irlap_recv_frmr_frame (skb, frame) * * Received Frame Reject response. * */ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { __u8 *frame; int w, x, y, z; IRDA_DEBUG(0, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(info != NULL, return;); if (!pskb_may_pull(skb, 4)) { IRDA_ERROR("%s: frame too short!\n", __func__); return; } frame = skb->data; info->nr = frame[2] >> 5; /* Next to receive */ info->pf = frame[2] & PF_BIT; /* Final bit */ info->ns = (frame[2] >> 1) & 0x07; /* Next to send */ w = frame[3] & 0x01; x = frame[3] & 0x02; y = frame[3] & 0x04; z = frame[3] & 0x08; if (w) { IRDA_DEBUG(0, "Rejected control field is undefined or not " "implemented.\n"); } if (x) { IRDA_DEBUG(0, "Rejected control field was invalid because it " "contained a non permitted I field.\n"); } if (y) { IRDA_DEBUG(0, "Received I field exceeded the maximum negotiated " "for the existing connection or exceeded the maximum " "this station supports if no connection exists.\n"); } if (z) { IRDA_DEBUG(0, "Rejected control field control field contained an " "invalid Nr count.\n"); } irlap_do_event(self, RECV_FRMR_RSP, skb, info); } /* * Function irlap_send_test_frame (self, daddr) * * Send a test frame response * */ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr, struct sk_buff *cmd) { struct sk_buff *tx_skb; struct test_frame *frame; __u8 *info; tx_skb = alloc_skb(cmd->len + sizeof(struct test_frame), GFP_ATOMIC); if (!tx_skb) return; /* Broadcast frames must include saddr and daddr fields */ if (caddr == CBROADCAST) { frame = (struct test_frame *) skb_put(tx_skb, sizeof(struct test_frame)); /* Insert the swapped addresses */ frame->saddr = cpu_to_le32(self->saddr); frame->daddr = cpu_to_le32(daddr); } else frame = (struct test_frame *) skb_put(tx_skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER); frame->caddr = caddr; frame->control = TEST_RSP | PF_BIT; /* Copy info */ info = skb_put(tx_skb, cmd->len); memcpy(info, cmd->data, cmd->len); /* Return to sender */ irlap_wait_min_turn_around(self, &self->qos_tx); irlap_queue_xmit(self, tx_skb); } /* * Function irlap_recv_test_frame (self, skb) * * Receive a test frame * */ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { struct test_frame *frame; IRDA_DEBUG(2, "%s()\n", __func__); if (!pskb_may_pull(skb, sizeof(*frame))) { IRDA_ERROR("%s: frame too short!\n", __func__); return; } frame = (struct test_frame *) skb->data; /* Broadcast frames must carry saddr and daddr fields */ if (info->caddr == CBROADCAST) { if (skb->len < sizeof(struct test_frame)) { IRDA_DEBUG(0, "%s() test frame too short!\n", __func__); return; } /* Read and swap addresses */ info->daddr = le32_to_cpu(frame->saddr); info->saddr = le32_to_cpu(frame->daddr); /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { return; } } if (command) irlap_do_event(self, RECV_TEST_CMD, skb, info); else irlap_do_event(self, RECV_TEST_RSP, skb, info); } /* * Function irlap_driver_rcv (skb, netdev, ptype) * * Called when a frame is received. Dispatches the right receive function * for processing of the frame. * * Note on skb management : * After calling the higher layers of the IrDA stack, we always * kfree() the skb, which drop the reference count (and potentially * destroy it). * If a higher layer of the stack want to keep the skb around (to put * in a queue or pass it to the higher layer), it will need to use * skb_get() to keep a reference on it. This is usually done at the * LMP level in irlmp.c. * Jean II */ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct irlap_info info; struct irlap_cb *self; int command; __u8 control; int ret = -1; if (!net_eq(dev_net(dev), &init_net)) goto out; /* FIXME: should we get our own field? */ self = (struct irlap_cb *) dev->atalk_ptr; /* If the net device is down, then IrLAP is gone! */ if (!self || self->magic != LAP_MAGIC) goto err; /* We are no longer an "old" protocol, so we need to handle * share and non linear skbs. This should never happen, so * we don't need to be clever about it. Jean II */ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IRDA_ERROR("%s: can't clone shared skb!\n", __func__); goto err; } /* Check if frame is large enough for parsing */ if (!pskb_may_pull(skb, 2)) { IRDA_ERROR("%s: frame too short!\n", __func__); goto err; } command = skb->data[0] & CMD_FRAME; info.caddr = skb->data[0] & CBROADCAST; info.pf = skb->data[1] & PF_BIT; info.control = skb->data[1] & ~PF_BIT; /* Mask away poll/final bit */ control = info.control; /* First we check if this frame has a valid connection address */ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { IRDA_DEBUG(0, "%s(), wrong connection address!\n", __func__); goto out; } /* * Optimize for the common case and check if the frame is an * I(nformation) frame. Only I-frames have bit 0 set to 0 */ if (~control & 0x01) { irlap_recv_i_frame(self, skb, &info, command); goto out; } /* * We now check is the frame is an S(upervisory) frame. Only * S-frames have bit 0 set to 1 and bit 1 set to 0 */ if (~control & 0x02) { /* * Received S(upervisory) frame, check which frame type it is * only the first nibble is of interest */ switch (control & 0x0f) { case RR: irlap_recv_rr_frame(self, skb, &info, command); break; case RNR: irlap_recv_rnr_frame(self, skb, &info, command); break; case REJ: irlap_recv_rej_frame(self, skb, &info, command); break; case SREJ: irlap_recv_srej_frame(self, skb, &info, command); break; default: IRDA_WARNING("%s: Unknown S-frame %02x received!\n", __func__, info.control); break; } goto out; } /* * This must be a C(ontrol) frame */ switch (control) { case XID_RSP: irlap_recv_discovery_xid_rsp(self, skb, &info); break; case XID_CMD: irlap_recv_discovery_xid_cmd(self, skb, &info); break; case SNRM_CMD: irlap_recv_snrm_cmd(self, skb, &info); break; case DM_RSP: irlap_do_event(self, RECV_DM_RSP, skb, &info); break; case DISC_CMD: /* And RD_RSP since they have the same value */ irlap_recv_disc_frame(self, skb, &info, command); break; case TEST_CMD: irlap_recv_test_frame(self, skb, &info, command); break; case UA_RSP: irlap_recv_ua_frame(self, skb, &info); break; case FRMR_RSP: irlap_recv_frmr_frame(self, skb, &info); break; case UI_FRAME: irlap_recv_ui_frame(self, skb, &info); break; default: IRDA_WARNING("%s: Unknown frame %02x received!\n", __func__, info.control); break; } out: ret = 0; err: /* Always drop our reference on the skb */ dev_kfree_skb(skb); return ret; }
gpl-2.0
Savaged-Zen/Savaged-Zen
drivers/pci/pcie/aer/aer_inject.c
163
12036
/* * PCIe AER software error injection support. * * Debuging PCIe AER code is quite difficult because it is hard to * trigger various real hardware errors. Software based error * injection can fake almost all kinds of errors with the help of a * user space helper tool aer-inject, which can be gotten from: * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ * * Copyright 2009 Intel Corporation. * Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/stddef.h> #include "aerdrv.h" struct aer_error_inj { u8 bus; u8 dev; u8 fn; u32 uncor_status; u32 cor_status; u32 header_log0; u32 header_log1; u32 header_log2; u32 header_log3; u16 domain; }; struct aer_error { struct list_head list; u16 domain; unsigned int bus; unsigned int devfn; int pos_cap_err; u32 uncor_status; u32 cor_status; u32 header_log0; u32 header_log1; u32 header_log2; u32 header_log3; u32 root_status; u32 source_id; }; struct pci_bus_ops { struct list_head list; struct pci_bus *bus; struct pci_ops *ops; }; static LIST_HEAD(einjected); static LIST_HEAD(pci_bus_ops_list); /* Protect einjected and pci_bus_ops_list */ static DEFINE_SPINLOCK(inject_lock); static void aer_error_init(struct aer_error *err, u16 domain, unsigned int bus, unsigned int devfn, int pos_cap_err) { INIT_LIST_HEAD(&err->list); err->domain = domain; err->bus = bus; err->devfn = devfn; err->pos_cap_err = pos_cap_err; } /* inject_lock must be held before calling */ static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, unsigned int devfn) { struct aer_error *err; list_for_each_entry(err, &einjected, list) { if (domain == err->domain && bus == err->bus && devfn == err->devfn) return err; } return NULL; } /* inject_lock must be held before calling */ static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) { int domain = pci_domain_nr(dev->bus); if (domain < 0) return NULL; return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); } /* inject_lock must be held before calling */ static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus) { struct pci_bus_ops *bus_ops; list_for_each_entry(bus_ops, &pci_bus_ops_list, list) { if (bus_ops->bus == bus) return bus_ops->ops; } return NULL; } static struct pci_bus_ops *pci_bus_ops_pop(void) { unsigned long flags; struct pci_bus_ops *bus_ops = NULL; spin_lock_irqsave(&inject_lock, flags); if (list_empty(&pci_bus_ops_list)) bus_ops = NULL; else { struct list_head *lh = pci_bus_ops_list.next; list_del(lh); bus_ops = list_entry(lh, struct pci_bus_ops, list); } spin_unlock_irqrestore(&inject_lock, flags); return bus_ops; } static u32 *find_pci_config_dword(struct aer_error *err, int where, int *prw1cs) { int rw1cs = 0; u32 *target = NULL; if (err->pos_cap_err == -1) return NULL; switch (where - err->pos_cap_err) { case PCI_ERR_UNCOR_STATUS: target = &err->uncor_status; rw1cs = 1; break; case PCI_ERR_COR_STATUS: target = &err->cor_status; rw1cs = 1; break; case PCI_ERR_HEADER_LOG: target = &err->header_log0; break; case PCI_ERR_HEADER_LOG+4: target = &err->header_log1; break; case PCI_ERR_HEADER_LOG+8: target = &err->header_log2; break; case PCI_ERR_HEADER_LOG+12: target = &err->header_log3; break; case PCI_ERR_ROOT_STATUS: target = &err->root_status; rw1cs = 1; break; case PCI_ERR_ROOT_ERR_SRC: target = &err->source_id; break; } if (prw1cs) *prw1cs = rw1cs; return target; } static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 *sim; struct aer_error *err; unsigned long flags; struct pci_ops *ops; int domain; spin_lock_irqsave(&inject_lock, flags); if (size != sizeof(u32)) goto out; domain = pci_domain_nr(bus); if (domain < 0) goto out; err = __find_aer_error((u16)domain, bus->number, devfn); if (!err) goto out; sim = find_pci_config_dword(err, where, NULL); if (sim) { *val = *sim; spin_unlock_irqrestore(&inject_lock, flags); return 0; } out: ops = __find_pci_bus_ops(bus); spin_unlock_irqrestore(&inject_lock, flags); return ops->read(bus, devfn, where, size, val); } int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 *sim; struct aer_error *err; unsigned long flags; int rw1cs; struct pci_ops *ops; int domain; spin_lock_irqsave(&inject_lock, flags); if (size != sizeof(u32)) goto out; domain = pci_domain_nr(bus); if (domain < 0) goto out; err = __find_aer_error((u16)domain, bus->number, devfn); if (!err) goto out; sim = find_pci_config_dword(err, where, &rw1cs); if (sim) { if (rw1cs) *sim ^= val; else *sim = val; spin_unlock_irqrestore(&inject_lock, flags); return 0; } out: ops = __find_pci_bus_ops(bus); spin_unlock_irqrestore(&inject_lock, flags); return ops->write(bus, devfn, where, size, val); } static struct pci_ops pci_ops_aer = { .read = pci_read_aer, .write = pci_write_aer, }; static void pci_bus_ops_init(struct pci_bus_ops *bus_ops, struct pci_bus *bus, struct pci_ops *ops) { INIT_LIST_HEAD(&bus_ops->list); bus_ops->bus = bus; bus_ops->ops = ops; } static int pci_bus_set_aer_ops(struct pci_bus *bus) { struct pci_ops *ops; struct pci_bus_ops *bus_ops; unsigned long flags; bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL); if (!bus_ops) return -ENOMEM; ops = pci_bus_set_ops(bus, &pci_ops_aer); spin_lock_irqsave(&inject_lock, flags); if (ops == &pci_ops_aer) goto out; pci_bus_ops_init(bus_ops, bus, ops); list_add(&bus_ops->list, &pci_bus_ops_list); bus_ops = NULL; out: spin_unlock_irqrestore(&inject_lock, flags); kfree(bus_ops); return 0; } static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { while (1) { if (!pci_is_pcie(dev)) break; if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) return dev; if (!dev->bus->self) break; dev = dev->bus->self; } return NULL; } static int find_aer_device_iter(struct device *device, void *data) { struct pcie_device **result = data; struct pcie_device *pcie_dev; if (device->bus == &pcie_port_bus_type) { pcie_dev = to_pcie_device(device); if (pcie_dev->service & PCIE_PORT_SERVICE_AER) { *result = pcie_dev; return 1; } } return 0; } static int find_aer_device(struct pci_dev *dev, struct pcie_device **result) { return device_for_each_child(&dev->dev, result, find_aer_device_iter); } static int aer_inject(struct aer_error_inj *einj) { struct aer_error *err, *rperr; struct aer_error *err_alloc = NULL, *rperr_alloc = NULL; struct pci_dev *dev, *rpdev; struct pcie_device *edev; unsigned long flags; unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); int pos_cap_err, rp_pos_cap_err; u32 sever, cor_mask, uncor_mask; int ret = 0; dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); if (!dev) return -ENODEV; rpdev = pcie_find_root_port(dev); if (!rpdev) { ret = -ENOTTY; goto out_put; } pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos_cap_err) { ret = -ENOTTY; goto out_put; } pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask); pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &uncor_mask); rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); if (!rp_pos_cap_err) { ret = -ENOTTY; goto out_put; } err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); if (!err_alloc) { ret = -ENOMEM; goto out_put; } rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); if (!rperr_alloc) { ret = -ENOMEM; goto out_put; } spin_lock_irqsave(&inject_lock, flags); err = __find_aer_error_by_dev(dev); if (!err) { err = err_alloc; err_alloc = NULL; aer_error_init(err, einj->domain, einj->bus, devfn, pos_cap_err); list_add(&err->list, &einjected); } err->uncor_status |= einj->uncor_status; err->cor_status |= einj->cor_status; err->header_log0 = einj->header_log0; err->header_log1 = einj->header_log1; err->header_log2 = einj->header_log2; err->header_log3 = einj->header_log3; if (einj->cor_status && !(einj->cor_status & ~cor_mask)) { ret = -EINVAL; printk(KERN_WARNING "The correctable error(s) is masked " "by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) { ret = -EINVAL; printk(KERN_WARNING "The uncorrectable error(s) is masked " "by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } rperr = __find_aer_error_by_dev(rpdev); if (!rperr) { rperr = rperr_alloc; rperr_alloc = NULL; aer_error_init(rperr, pci_domain_nr(rpdev->bus), rpdev->bus->number, rpdev->devfn, rp_pos_cap_err); list_add(&rperr->list, &einjected); } if (einj->cor_status) { if (rperr->root_status & PCI_ERR_ROOT_COR_RCV) rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; else rperr->root_status |= PCI_ERR_ROOT_COR_RCV; rperr->source_id &= 0xffff0000; rperr->source_id |= (einj->bus << 8) | devfn; } if (einj->uncor_status) { if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV) rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; if (sever & einj->uncor_status) { rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV; if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)) rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL; } else rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV; rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV; rperr->source_id &= 0x0000ffff; rperr->source_id |= ((einj->bus << 8) | devfn) << 16; } spin_unlock_irqrestore(&inject_lock, flags); ret = pci_bus_set_aer_ops(dev->bus); if (ret) goto out_put; ret = pci_bus_set_aer_ops(rpdev->bus); if (ret) goto out_put; if (find_aer_device(rpdev, &edev)) { if (!get_service_data(edev)) { printk(KERN_WARNING "AER service is not initialized\n"); ret = -EINVAL; goto out_put; } aer_irq(-1, edev); } else ret = -EINVAL; out_put: kfree(err_alloc); kfree(rperr_alloc); pci_dev_put(dev); return ret; } static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off) { struct aer_error_inj einj; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (usize < offsetof(struct aer_error_inj, domain) || usize > sizeof(einj)) return -EINVAL; memset(&einj, 0, sizeof(einj)); if (copy_from_user(&einj, ubuf, usize)) return -EFAULT; ret = aer_inject(&einj); return ret ? ret : usize; } static const struct file_operations aer_inject_fops = { .write = aer_inject_write, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice aer_inject_device = { .minor = MISC_DYNAMIC_MINOR, .name = "aer_inject", .fops = &aer_inject_fops, }; static int __init aer_inject_init(void) { return misc_register(&aer_inject_device); } static void __exit aer_inject_exit(void) { struct aer_error *err, *err_next; unsigned long flags; struct pci_bus_ops *bus_ops; misc_deregister(&aer_inject_device); while ((bus_ops = pci_bus_ops_pop())) { pci_bus_set_ops(bus_ops->bus, bus_ops->ops); kfree(bus_ops); } spin_lock_irqsave(&inject_lock, flags); list_for_each_entry_safe(err, err_next, &einjected, list) { list_del(&err->list); kfree(err); } spin_unlock_irqrestore(&inject_lock, flags); } module_init(aer_inject_init); module_exit(aer_inject_exit); MODULE_DESCRIPTION("PCIe AER software error injector"); MODULE_LICENSE("GPL");
gpl-2.0
getitnowmarketing/mecha_2.6.32
sound/pci/ice1712/maya44.c
419
21072
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for ESI Maya44 cards * * Copyright (c) 2009 Takashi Iwai <tiwai@suse.de> * Based on the patches by Rainer Zimmermann <mail@lightshed.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "maya44.h" /* WM8776 register indexes */ #define WM8776_REG_HEADPHONE_L 0x00 #define WM8776_REG_HEADPHONE_R 0x01 #define WM8776_REG_HEADPHONE_MASTER 0x02 #define WM8776_REG_DAC_ATTEN_L 0x03 #define WM8776_REG_DAC_ATTEN_R 0x04 #define WM8776_REG_DAC_ATTEN_MASTER 0x05 #define WM8776_REG_DAC_PHASE 0x06 #define WM8776_REG_DAC_CONTROL 0x07 #define WM8776_REG_DAC_MUTE 0x08 #define WM8776_REG_DAC_DEEMPH 0x09 #define WM8776_REG_DAC_IF_CONTROL 0x0a #define WM8776_REG_ADC_IF_CONTROL 0x0b #define WM8776_REG_MASTER_MODE_CONTROL 0x0c #define WM8776_REG_POWERDOWN 0x0d #define WM8776_REG_ADC_ATTEN_L 0x0e #define WM8776_REG_ADC_ATTEN_R 0x0f #define WM8776_REG_ADC_ALC1 0x10 #define WM8776_REG_ADC_ALC2 0x11 #define WM8776_REG_ADC_ALC3 0x12 #define WM8776_REG_ADC_NOISE_GATE 0x13 #define WM8776_REG_ADC_LIMITER 0x14 #define WM8776_REG_ADC_MUX 0x15 #define WM8776_REG_OUTPUT_MUX 0x16 #define WM8776_REG_RESET 0x17 #define WM8776_NUM_REGS 0x18 /* clock ratio identifiers for snd_wm8776_set_rate() */ #define WM8776_CLOCK_RATIO_128FS 0 #define WM8776_CLOCK_RATIO_192FS 1 #define WM8776_CLOCK_RATIO_256FS 2 #define WM8776_CLOCK_RATIO_384FS 3 #define WM8776_CLOCK_RATIO_512FS 4 #define WM8776_CLOCK_RATIO_768FS 5 enum { WM_VOL_HP, WM_VOL_DAC, WM_VOL_ADC, WM_NUM_VOLS }; enum { WM_SW_DAC, WM_SW_BYPASS, WM_NUM_SWITCHES }; struct snd_wm8776 { unsigned char addr; unsigned short regs[WM8776_NUM_REGS]; unsigned char volumes[WM_NUM_VOLS][2]; unsigned int switch_bits; }; struct snd_maya44 { struct snd_ice1712 *ice; struct snd_wm8776 wm[2]; struct mutex mutex; }; /* write the given register and save the data to the cache */ static void wm8776_write(struct snd_ice1712 *ice, struct snd_wm8776 *wm, unsigned char reg, unsigned short val) { /* * WM8776 registers are up to 9 bits wide, bit 8 is placed in the LSB * of the address field */ snd_vt1724_write_i2c(ice, wm->addr, (reg << 1) | ((val >> 8) & 1), val & 0xff); wm->regs[reg] = val; } /* * update the given register with and/or mask and save the data to the cache */ static int wm8776_write_bits(struct snd_ice1712 *ice, struct snd_wm8776 *wm, unsigned char reg, unsigned short mask, unsigned short val) { val |= wm->regs[reg] & ~mask; if (val != wm->regs[reg]) { wm8776_write(ice, wm, reg, val); return 1; } return 0; } /* * WM8776 volume controls */ struct maya_vol_info { unsigned int maxval; /* volume range: 0..maxval */ unsigned char regs[2]; /* left and right registers */ unsigned short mask; /* value mask */ unsigned short offset; /* zero-value offset */ unsigned short mute; /* mute bit */ unsigned short update; /* update bits */ unsigned char mux_bits[2]; /* extra bits for ADC mute */ }; static struct maya_vol_info vol_info[WM_NUM_VOLS] = { [WM_VOL_HP] = { .maxval = 80, .regs = { WM8776_REG_HEADPHONE_L, WM8776_REG_HEADPHONE_R }, .mask = 0x7f, .offset = 0x30, .mute = 0x00, .update = 0x180, /* update and zero-cross enable */ }, [WM_VOL_DAC] = { .maxval = 255, .regs = { WM8776_REG_DAC_ATTEN_L, WM8776_REG_DAC_ATTEN_R }, .mask = 0xff, .offset = 0x01, .mute = 0x00, .update = 0x100, /* zero-cross enable */ }, [WM_VOL_ADC] = { .maxval = 91, .regs = { WM8776_REG_ADC_ATTEN_L, WM8776_REG_ADC_ATTEN_R }, .mask = 0xff, .offset = 0xa5, .mute = 0xa5, .update = 0x100, /* update */ .mux_bits = { 0x80, 0x40 }, /* ADCMUX bits */ }, }; /* * dB tables */ /* headphone output: mute, -73..+6db (1db step) */ static const DECLARE_TLV_DB_SCALE(db_scale_hp, -7400, 100, 1); /* DAC output: mute, -127..0db (0.5db step) */ static const DECLARE_TLV_DB_SCALE(db_scale_dac, -12750, 50, 1); /* ADC gain: mute, -21..+24db (0.5db step) */ static const DECLARE_TLV_DB_SCALE(db_scale_adc, -2100, 50, 1); static int maya_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int idx = kcontrol->private_value; struct maya_vol_info *vol = &vol_info[idx]; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = vol->maxval; return 0; } static int maya_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); struct snd_wm8776 *wm = &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)]; unsigned int idx = kcontrol->private_value; mutex_lock(&chip->mutex); ucontrol->value.integer.value[0] = wm->volumes[idx][0]; ucontrol->value.integer.value[1] = wm->volumes[idx][1]; mutex_unlock(&chip->mutex); return 0; } static int maya_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); struct snd_wm8776 *wm = &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)]; unsigned int idx = kcontrol->private_value; struct maya_vol_info *vol = &vol_info[idx]; unsigned int val, data; int ch, changed = 0; mutex_lock(&chip->mutex); for (ch = 0; ch < 2; ch++) { val = ucontrol->value.integer.value[ch]; if (val > vol->maxval) val = vol->maxval; if (val == wm->volumes[idx][ch]) continue; if (!val) data = vol->mute; else data = (val - 1) + vol->offset; data |= vol->update; changed |= wm8776_write_bits(chip->ice, wm, vol->regs[ch], vol->mask | vol->update, data); if (vol->mux_bits[ch]) wm8776_write_bits(chip->ice, wm, WM8776_REG_ADC_MUX, vol->mux_bits[ch], val ? 0 : vol->mux_bits[ch]); wm->volumes[idx][ch] = val; } mutex_unlock(&chip->mutex); return changed; } /* * WM8776 switch controls */ #define COMPOSE_SW_VAL(idx, reg, mask) ((idx) | ((reg) << 8) | ((mask) << 16)) #define GET_SW_VAL_IDX(val) ((val) & 0xff) #define GET_SW_VAL_REG(val) (((val) >> 8) & 0xff) #define GET_SW_VAL_MASK(val) (((val) >> 16) & 0xff) #define maya_sw_info snd_ctl_boolean_mono_info static int maya_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); struct snd_wm8776 *wm = &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)]; unsigned int idx = GET_SW_VAL_IDX(kcontrol->private_value); ucontrol->value.integer.value[0] = (wm->switch_bits >> idx) & 1; return 0; } static int maya_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); struct snd_wm8776 *wm = &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)]; unsigned int idx = GET_SW_VAL_IDX(kcontrol->private_value); unsigned int mask, val; int changed; mutex_lock(&chip->mutex); mask = 1 << idx; wm->switch_bits &= ~mask; val = ucontrol->value.integer.value[0]; if (val) wm->switch_bits |= mask; mask = GET_SW_VAL_MASK(kcontrol->private_value); changed = wm8776_write_bits(chip->ice, wm, GET_SW_VAL_REG(kcontrol->private_value), mask, val ? mask : 0); mutex_unlock(&chip->mutex); return changed; } /* * GPIO pins (known ones for maya44) */ #define GPIO_PHANTOM_OFF 2 #define GPIO_MIC_RELAY 4 #define GPIO_SPDIF_IN_INV 5 #define GPIO_MUST_BE_0 7 /* * GPIO switch controls */ #define COMPOSE_GPIO_VAL(shift, inv) ((shift) | ((inv) << 8)) #define GET_GPIO_VAL_SHIFT(val) ((val) & 0xff) #define GET_GPIO_VAL_INV(val) (((val) >> 8) & 1) static int maya_set_gpio_bits(struct snd_ice1712 *ice, unsigned int mask, unsigned int bits) { unsigned int data; data = snd_ice1712_gpio_read(ice); if ((data & mask) == bits) return 0; snd_ice1712_gpio_write(ice, (data & ~mask) | bits); return 1; } #define maya_gpio_sw_info snd_ctl_boolean_mono_info static int maya_gpio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); unsigned int shift = GET_GPIO_VAL_SHIFT(kcontrol->private_value); unsigned int val; val = (snd_ice1712_gpio_read(chip->ice) >> shift) & 1; if (GET_GPIO_VAL_INV(kcontrol->private_value)) val = !val; ucontrol->value.integer.value[0] = val; return 0; } static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); unsigned int shift = GET_GPIO_VAL_SHIFT(kcontrol->private_value); unsigned int val, mask; int changed; mutex_lock(&chip->mutex); mask = 1 << shift; val = ucontrol->value.integer.value[0]; if (GET_GPIO_VAL_INV(kcontrol->private_value)) val = !val; val = val ? mask : 0; changed = maya_set_gpio_bits(chip->ice, mask, val); mutex_unlock(&chip->mutex); return changed; } /* * capture source selection */ /* known working input slots (0-4) */ #define MAYA_LINE_IN 1 /* in-2 */ #define MAYA_MIC_IN 4 /* in-5 */ static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line) { wm8776_write_bits(chip->ice, &chip->wm[idx], WM8776_REG_ADC_MUX, 0x1f, 1 << line); } static int maya_rec_src_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[] = { "Line", "Mic" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int maya_rec_src_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); int sel; if (snd_ice1712_gpio_read(chip->ice) & (1 << GPIO_MIC_RELAY)) sel = 1; else sel = 0; ucontrol->value.enumerated.item[0] = sel; return 0; } static int maya_rec_src_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); int sel = ucontrol->value.enumerated.item[0]; int changed; mutex_lock(&chip->mutex); changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY, sel ? GPIO_MIC_RELAY : 0); wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN); mutex_unlock(&chip->mutex); return changed; } /* * Maya44 routing switch settings have different meanings than the standard * ice1724 switches as defined in snd_vt1724_pro_route_info (ice1724.c). */ static int maya_pb_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[] = { "PCM Out", /* 0 */ "Input 1", "Input 2", "Input 3", "Input 4" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int maya_pb_route_shift(int idx) { static const unsigned char shift[10] = { 8, 20, 0, 3, 11, 23, 14, 26, 17, 29 }; return shift[idx % 10]; } static int maya_pb_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = snd_ice1724_get_route_val(chip->ice, maya_pb_route_shift(idx)); return 0; } static int maya_pb_route_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_ice1724_put_route_val(chip->ice, ucontrol->value.enumerated.item[0], maya_pb_route_shift(idx)); } /* * controls to be added */ static struct snd_kcontrol_new maya_controls[] __devinitdata = { { .name = "Crossmix Playback Volume", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = maya_vol_info, .get = maya_vol_get, .put = maya_vol_put, .tlv = { .p = db_scale_hp }, .private_value = WM_VOL_HP, .count = 2, }, { .name = "PCM Playback Volume", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = maya_vol_info, .get = maya_vol_get, .put = maya_vol_put, .tlv = { .p = db_scale_dac }, .private_value = WM_VOL_DAC, .count = 2, }, { .name = "Line Capture Volume", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = maya_vol_info, .get = maya_vol_get, .put = maya_vol_put, .tlv = { .p = db_scale_adc }, .private_value = WM_VOL_ADC, .count = 2, }, { .name = "PCM Playback Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = maya_sw_info, .get = maya_sw_get, .put = maya_sw_put, .private_value = COMPOSE_SW_VAL(WM_SW_DAC, WM8776_REG_OUTPUT_MUX, 0x01), .count = 2, }, { .name = "Bypass Playback Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = maya_sw_info, .get = maya_sw_get, .put = maya_sw_put, .private_value = COMPOSE_SW_VAL(WM_SW_BYPASS, WM8776_REG_OUTPUT_MUX, 0x04), .count = 2, }, { .name = "Capture Source", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = maya_rec_src_info, .get = maya_rec_src_get, .put = maya_rec_src_put, }, { .name = "Mic Phantom Power Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = maya_gpio_sw_info, .get = maya_gpio_sw_get, .put = maya_gpio_sw_put, .private_value = COMPOSE_GPIO_VAL(GPIO_PHANTOM_OFF, 1), }, { .name = "SPDIF Capture Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = maya_gpio_sw_info, .get = maya_gpio_sw_get, .put = maya_gpio_sw_put, .private_value = COMPOSE_GPIO_VAL(GPIO_SPDIF_IN_INV, 1), }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "H/W Playback Route", .info = maya_pb_route_info, .get = maya_pb_route_get, .put = maya_pb_route_put, .count = 4, /* FIXME: do controls 5-9 have any meaning? */ }, }; static int __devinit maya44_add_controls(struct snd_ice1712 *ice) { int err, i; for (i = 0; i < ARRAY_SIZE(maya_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&maya_controls[i], ice->spec)); if (err < 0) return err; } return 0; } /* * initialize a wm8776 chip */ static void __devinit wm8776_init(struct snd_ice1712 *ice, struct snd_wm8776 *wm, unsigned int addr) { static const unsigned short inits_wm8776[] = { 0x02, 0x100, /* R2: headphone L+R muted + update */ 0x05, 0x100, /* R5: DAC output L+R muted + update */ 0x06, 0x000, /* R6: DAC output phase normal */ 0x07, 0x091, /* R7: DAC enable zero cross detection, normal output */ 0x08, 0x000, /* R8: DAC soft mute off */ 0x09, 0x000, /* R9: no deemph, DAC zero detect disabled */ 0x0a, 0x022, /* R10: DAC I2C mode, std polarities, 24bit */ 0x0b, 0x022, /* R11: ADC I2C mode, std polarities, 24bit, highpass filter enabled */ 0x0c, 0x042, /* R12: ADC+DAC slave, ADC+DAC 44,1kHz */ 0x0d, 0x000, /* R13: all power up */ 0x0e, 0x100, /* R14: ADC left muted, enable zero cross detection */ 0x0f, 0x100, /* R15: ADC right muted, enable zero cross detection */ /* R16: ALC...*/ 0x11, 0x000, /* R17: disable ALC */ /* R18: ALC...*/ /* R19: noise gate...*/ 0x15, 0x000, /* R21: ADC input mux init, mute all inputs */ 0x16, 0x001, /* R22: output mux, select DAC */ 0xff, 0xff }; const unsigned short *ptr; unsigned char reg; unsigned short data; wm->addr = addr; /* enable DAC output; mute bypass, aux & all inputs */ wm->switch_bits = (1 << WM_SW_DAC); ptr = inits_wm8776; while (*ptr != 0xff) { reg = *ptr++; data = *ptr++; wm8776_write(ice, wm, reg, data); } } /* * change the rate on the WM8776 codecs. * this assumes that the VT17xx's rate is changed by the calling function. * NOTE: even though the WM8776's are running in slave mode and rate * selection is automatic, we need to call snd_wm8776_set_rate() here * to make sure some flags are set correctly. */ static void set_rate(struct snd_ice1712 *ice, unsigned int rate) { struct snd_maya44 *chip = ice->spec; unsigned int ratio, adc_ratio, val; int i; switch (rate) { case 192000: ratio = WM8776_CLOCK_RATIO_128FS; break; case 176400: ratio = WM8776_CLOCK_RATIO_128FS; break; case 96000: ratio = WM8776_CLOCK_RATIO_256FS; break; case 88200: ratio = WM8776_CLOCK_RATIO_384FS; break; case 48000: ratio = WM8776_CLOCK_RATIO_512FS; break; case 44100: ratio = WM8776_CLOCK_RATIO_512FS; break; case 32000: ratio = WM8776_CLOCK_RATIO_768FS; break; case 0: /* no hint - S/PDIF input is master, simply return */ return; default: snd_BUG(); return; } /* * this currently sets the same rate for ADC and DAC, but limits * ADC rate to 256X (96kHz). For 256X mode (96kHz), this sets ADC * oversampling to 64x, as recommended by WM8776 datasheet. * Setting the rate is not really necessary in slave mode. */ adc_ratio = ratio; if (adc_ratio < WM8776_CLOCK_RATIO_256FS) adc_ratio = WM8776_CLOCK_RATIO_256FS; val = adc_ratio; if (adc_ratio == WM8776_CLOCK_RATIO_256FS) val |= 8; val |= ratio << 4; mutex_lock(&chip->mutex); for (i = 0; i < 2; i++) wm8776_write_bits(ice, &chip->wm[i], WM8776_REG_MASTER_MODE_CONTROL, 0x180, val); mutex_unlock(&chip->mutex); } /* * supported sample rates (to override the default one) */ static unsigned int rates[] = { 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000 }; /* playback rates: 32..192 kHz */ static struct snd_pcm_hw_constraint_list dac_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0 }; /* * chip addresses on I2C bus */ static unsigned char wm8776_addr[2] __devinitdata = { 0x34, 0x36, /* codec 0 & 1 */ }; /* * initialize the chip */ static int __devinit maya44_init(struct snd_ice1712 *ice) { int i; struct snd_maya44 *chip; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; mutex_init(&chip->mutex); chip->ice = ice; ice->spec = chip; /* initialise codecs */ ice->num_total_dacs = 4; ice->num_total_adcs = 4; ice->akm_codecs = 0; for (i = 0; i < 2; i++) { wm8776_init(ice, &chip->wm[i], wm8776_addr[i]); wm8776_select_input(chip, i, MAYA_LINE_IN); } /* set card specific rates */ ice->hw_rates = &dac_rates; /* register change rate notifier */ ice->gpio.set_pro_rate = set_rate; /* RDMA1 (2nd input channel) is used for ADC by default */ ice->force_rdma1 = 1; /* have an own routing control */ ice->own_routing = 1; return 0; } /* * Maya44 boards don't provide the EEPROM data except for the vendor IDs. * hence the driver needs to sets up it properly. */ static unsigned char maya44_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x45, /* clock xin1=49.152MHz, mpu401, 2 stereo ADCs+DACs */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* enable spdif out, spdif out supp, spdif-in, ext spdif out */ [ICE_EEP2_GPIO_DIR] = 0xff, [ICE_EEP2_GPIO_DIR1] = 0xff, [ICE_EEP2_GPIO_DIR2] = 0xff, [ICE_EEP2_GPIO_MASK] = 0/*0x9f*/, [ICE_EEP2_GPIO_MASK1] = 0/*0xff*/, [ICE_EEP2_GPIO_MASK2] = 0/*0x7f*/, [ICE_EEP2_GPIO_STATE] = (1 << GPIO_PHANTOM_OFF) | (1 << GPIO_SPDIF_IN_INV), [ICE_EEP2_GPIO_STATE1] = 0x00, [ICE_EEP2_GPIO_STATE2] = 0x00, }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_maya44_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_MAYA44, .name = "ESI Maya44", .model = "maya44", .chip_init = maya44_init, .build_controls = maya44_add_controls, .eeprom_size = sizeof(maya44_eeprom), .eeprom_data = maya44_eeprom, }, { } /* terminator */ };
gpl-2.0
simone201/neak-gs3-jb2
drivers/gpu/drm/exynos/exynos_mixer.c
419
28673
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: * Seung-Woo Kim <sw0312.kim@samsung.com> * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * * Based on drivers/media/video/s5p-tv/mixer_reg.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include "drmP.h" #include "regs-mixer.h" #include "regs-vp.h" #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) struct hdmi_win_data { dma_addr_t dma_addr; void __iomem *vaddr; dma_addr_t chroma_dma_addr; void __iomem *chroma_vaddr; uint32_t pixel_format; unsigned int bpp; unsigned int crtc_x; unsigned int crtc_y; unsigned int crtc_width; unsigned int crtc_height; unsigned int fb_x; unsigned int fb_y; unsigned int fb_width; unsigned int fb_height; unsigned int src_width; unsigned int src_height; unsigned int mode_width; unsigned int mode_height; unsigned int scan_flags; }; struct mixer_resources { int irq; void __iomem *mixer_regs; void __iomem *vp_regs; spinlock_t reg_slock; struct clk *mixer; struct clk *vp; struct clk *sclk_mixer; struct clk *sclk_hdmi; struct clk *sclk_dac; }; struct mixer_context { struct device *dev; int pipe; bool interlace; bool powered; u32 int_en; struct mutex mixer_mutex; struct mixer_resources mixer_res; struct hdmi_win_data win_data[MIXER_WIN_NR]; }; static const u8 filter_y_horiz_tap8[] = { 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 2, 4, 5, 6, 6, 6, 6, 6, 5, 5, 4, 3, 2, 1, 1, 0, -6, -12, -16, -18, -20, -21, -20, -20, -18, -16, -13, -10, -8, -5, -2, 127, 126, 125, 121, 114, 107, 99, 89, 79, 68, 57, 46, 35, 25, 16, 8, }; static const u8 filter_y_vert_tap4[] = { 0, -3, -6, -8, -8, -8, -8, -7, -6, -5, -4, -3, -2, -1, -1, 0, 127, 126, 124, 118, 111, 102, 92, 81, 70, 59, 48, 37, 27, 19, 11, 5, 0, 5, 11, 19, 27, 37, 48, 59, 70, 81, 92, 102, 111, 118, 124, 126, 0, 0, -1, -1, -2, -3, -4, -5, -6, -7, -8, -8, -8, -8, -6, -3, }; static const u8 filter_cr_horiz_tap4[] = { 0, -3, -6, -8, -8, -8, -8, -7, -6, -5, -4, -3, -2, -1, -1, 0, 127, 126, 124, 118, 111, 102, 92, 81, 70, 59, 48, 37, 27, 19, 11, 5, }; static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id) { return readl(res->vp_regs + reg_id); } static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id, u32 val) { writel(val, res->vp_regs + reg_id); } static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id, u32 val, u32 mask) { u32 old = vp_reg_read(res, reg_id); val = (val & mask) | (old & ~mask); writel(val, res->vp_regs + reg_id); } static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id) { return readl(res->mixer_regs + reg_id); } static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id, u32 val) { writel(val, res->mixer_regs + reg_id); } static inline void mixer_reg_writemask(struct mixer_resources *res, u32 reg_id, u32 val, u32 mask) { u32 old = mixer_reg_read(res, reg_id); val = (val & mask) | (old & ~mask); writel(val, res->mixer_regs + reg_id); } static void mixer_regs_dump(struct mixer_context *ctx) { #define DUMPREG(reg_id) \ do { \ DRM_DEBUG_KMS(#reg_id " = %08x\n", \ (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \ } while (0) DUMPREG(MXR_STATUS); DUMPREG(MXR_CFG); DUMPREG(MXR_INT_EN); DUMPREG(MXR_INT_STATUS); DUMPREG(MXR_LAYER_CFG); DUMPREG(MXR_VIDEO_CFG); DUMPREG(MXR_GRAPHIC0_CFG); DUMPREG(MXR_GRAPHIC0_BASE); DUMPREG(MXR_GRAPHIC0_SPAN); DUMPREG(MXR_GRAPHIC0_WH); DUMPREG(MXR_GRAPHIC0_SXY); DUMPREG(MXR_GRAPHIC0_DXY); DUMPREG(MXR_GRAPHIC1_CFG); DUMPREG(MXR_GRAPHIC1_BASE); DUMPREG(MXR_GRAPHIC1_SPAN); DUMPREG(MXR_GRAPHIC1_WH); DUMPREG(MXR_GRAPHIC1_SXY); DUMPREG(MXR_GRAPHIC1_DXY); #undef DUMPREG } static void vp_regs_dump(struct mixer_context *ctx) { #define DUMPREG(reg_id) \ do { \ DRM_DEBUG_KMS(#reg_id " = %08x\n", \ (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \ } while (0) DUMPREG(VP_ENABLE); DUMPREG(VP_SRESET); DUMPREG(VP_SHADOW_UPDATE); DUMPREG(VP_FIELD_ID); DUMPREG(VP_MODE); DUMPREG(VP_IMG_SIZE_Y); DUMPREG(VP_IMG_SIZE_C); DUMPREG(VP_PER_RATE_CTRL); DUMPREG(VP_TOP_Y_PTR); DUMPREG(VP_BOT_Y_PTR); DUMPREG(VP_TOP_C_PTR); DUMPREG(VP_BOT_C_PTR); DUMPREG(VP_ENDIAN_MODE); DUMPREG(VP_SRC_H_POSITION); DUMPREG(VP_SRC_V_POSITION); DUMPREG(VP_SRC_WIDTH); DUMPREG(VP_SRC_HEIGHT); DUMPREG(VP_DST_H_POSITION); DUMPREG(VP_DST_V_POSITION); DUMPREG(VP_DST_WIDTH); DUMPREG(VP_DST_HEIGHT); DUMPREG(VP_H_RATIO); DUMPREG(VP_V_RATIO); #undef DUMPREG } static inline void vp_filter_set(struct mixer_resources *res, int reg_id, const u8 *data, unsigned int size) { /* assure 4-byte align */ BUG_ON(size & 3); for (; size; size -= 4, reg_id += 4, data += 4) { u32 val = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; vp_reg_write(res, reg_id, val); } } static void vp_default_filter(struct mixer_resources *res) { vp_filter_set(res, VP_POLY8_Y0_LL, filter_y_horiz_tap8, sizeof filter_y_horiz_tap8); vp_filter_set(res, VP_POLY4_Y0_LL, filter_y_vert_tap4, sizeof filter_y_vert_tap4); vp_filter_set(res, VP_POLY4_C0_LL, filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4); } static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) { struct mixer_resources *res = &ctx->mixer_res; /* block update on vsync */ mixer_reg_writemask(res, MXR_STATUS, enable ? MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); vp_reg_write(res, VP_SHADOW_UPDATE, enable ? VP_SHADOW_UPDATE_ENABLE : 0); } static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) { struct mixer_resources *res = &ctx->mixer_res; u32 val; /* choosing between interlace and progressive mode */ val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRASSIVE); /* choosing between porper HD and SD mode */ if (height == 480) val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; else if (height == 576) val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; else if (height == 720) val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; else if (height == 1080) val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; else val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK); } static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) { struct mixer_resources *res = &ctx->mixer_res; u32 val; if (height == 480) { val = MXR_CFG_RGB601_0_255; } else if (height == 576) { val = MXR_CFG_RGB601_0_255; } else if (height == 720) { val = MXR_CFG_RGB709_16_235; mixer_reg_write(res, MXR_CM_COEFF_Y, (1 << 30) | (94 << 20) | (314 << 10) | (32 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CB, (972 << 20) | (851 << 10) | (225 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CR, (225 << 20) | (820 << 10) | (1004 << 0)); } else if (height == 1080) { val = MXR_CFG_RGB709_16_235; mixer_reg_write(res, MXR_CM_COEFF_Y, (1 << 30) | (94 << 20) | (314 << 10) | (32 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CB, (972 << 20) | (851 << 10) | (225 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CR, (225 << 20) | (820 << 10) | (1004 << 0)); } else { val = MXR_CFG_RGB709_16_235; mixer_reg_write(res, MXR_CM_COEFF_Y, (1 << 30) | (94 << 20) | (314 << 10) | (32 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CB, (972 << 20) | (851 << 10) | (225 << 0)); mixer_reg_write(res, MXR_CM_COEFF_CR, (225 << 20) | (820 << 10) | (1004 << 0)); } mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); } static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) { struct mixer_resources *res = &ctx->mixer_res; u32 val = enable ? ~0 : 0; switch (win) { case 0: mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); break; case 1: mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); break; case 2: vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); break; } } static void mixer_run(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); mixer_regs_dump(ctx); } static void vp_video_buffer(struct mixer_context *ctx, int win) { struct mixer_resources *res = &ctx->mixer_res; unsigned long flags; struct hdmi_win_data *win_data; unsigned int x_ratio, y_ratio; unsigned int buf_num; dma_addr_t luma_addr[2], chroma_addr[2]; bool tiled_mode = false; bool crcb_mode = false; u32 val; win_data = &ctx->win_data[win]; switch (win_data->pixel_format) { case DRM_FORMAT_NV12MT: tiled_mode = true; case DRM_FORMAT_NV12M: crcb_mode = false; buf_num = 2; break; /* TODO: single buffer format NV12, NV21 */ default: /* ignore pixel format at disable time */ if (!win_data->dma_addr) break; DRM_ERROR("pixel format for vp is wrong [%d].\n", win_data->pixel_format); return; } /* scaling feature: (src << 16) / dst */ x_ratio = (win_data->src_width << 16) / win_data->crtc_width; y_ratio = (win_data->src_height << 16) / win_data->crtc_height; if (buf_num == 2) { luma_addr[0] = win_data->dma_addr; chroma_addr[0] = win_data->chroma_dma_addr; } else { luma_addr[0] = win_data->dma_addr; chroma_addr[0] = win_data->dma_addr + (win_data->fb_width * win_data->fb_height); } if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { ctx->interlace = true; if (tiled_mode) { luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; } else { luma_addr[1] = luma_addr[0] + win_data->fb_width; chroma_addr[1] = chroma_addr[0] + win_data->fb_width; } } else { ctx->interlace = false; luma_addr[1] = 0; chroma_addr[1] = 0; } spin_lock_irqsave(&res->reg_slock, flags); mixer_vsync_set_update(ctx, false); /* interlace or progressive scan mode */ val = (ctx->interlace ? ~0 : 0); vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); /* setup format */ val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12); val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR); vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); /* setting size of input image */ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | VP_IMG_VSIZE(win_data->fb_height)); /* chroma height has to reduced by 2 to avoid chroma distorions */ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | VP_IMG_VSIZE(win_data->fb_height / 2)); vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height); vp_reg_write(res, VP_SRC_H_POSITION, VP_SRC_H_POSITION_VAL(win_data->fb_x)); vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y); vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width); vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x); if (ctx->interlace) { vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2); vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2); } else { vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height); vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y); } vp_reg_write(res, VP_H_RATIO, x_ratio); vp_reg_write(res, VP_V_RATIO, y_ratio); vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); /* set buffer address to vp */ vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]); vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]); vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); mixer_cfg_scan(ctx, win_data->mode_height); mixer_cfg_rgb_fmt(ctx, win_data->mode_height); mixer_cfg_layer(ctx, win, true); mixer_run(ctx); mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); vp_regs_dump(ctx); } static void mixer_graph_buffer(struct mixer_context *ctx, int win) { struct mixer_resources *res = &ctx->mixer_res; unsigned long flags; struct hdmi_win_data *win_data; unsigned int x_ratio, y_ratio; unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; dma_addr_t dma_addr; unsigned int fmt; u32 val; win_data = &ctx->win_data[win]; #define RGB565 4 #define ARGB1555 5 #define ARGB4444 6 #define ARGB8888 7 switch (win_data->bpp) { case 16: fmt = ARGB4444; break; case 32: fmt = ARGB8888; break; default: fmt = ARGB8888; } /* 2x scaling feature */ x_ratio = 0; y_ratio = 0; dst_x_offset = win_data->crtc_x; dst_y_offset = win_data->crtc_y; /* converting dma address base and source offset */ dma_addr = win_data->dma_addr + (win_data->fb_x * win_data->bpp >> 3) + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); src_x_offset = 0; src_y_offset = 0; if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) ctx->interlace = true; else ctx->interlace = false; spin_lock_irqsave(&res->reg_slock, flags); mixer_vsync_set_update(ctx, false); /* setup format */ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); /* setup geometry */ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); val = MXR_GRP_WH_WIDTH(win_data->crtc_width); val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height); val |= MXR_GRP_WH_H_SCALE(x_ratio); val |= MXR_GRP_WH_V_SCALE(y_ratio); mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); /* setup offsets in source image */ val = MXR_GRP_SXY_SX(src_x_offset); val |= MXR_GRP_SXY_SY(src_y_offset); mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val); /* setup offsets in display image */ val = MXR_GRP_DXY_DX(dst_x_offset); val |= MXR_GRP_DXY_DY(dst_y_offset); mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val); /* set buffer address to mixer */ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); mixer_cfg_scan(ctx, win_data->mode_height); mixer_cfg_rgb_fmt(ctx, win_data->mode_height); mixer_cfg_layer(ctx, win, true); mixer_run(ctx); mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); } static void vp_win_reset(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; int tries = 100; vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); for (tries = 100; tries; --tries) { /* waiting until VP_SRESET_PROCESSING is 0 */ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) break; mdelay(10); } WARN(tries == 0, "failed to reset Video Processor\n"); } static void mixer_win_reset(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; unsigned long flags; u32 val; /* value stored to register */ spin_lock_irqsave(&res->reg_slock, flags); mixer_vsync_set_update(ctx, false); mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); /* set output in RGB888 mode */ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK); /* 16 beat burst in DMA */ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); /* setting default layer priority: layer1 > layer0 > video * because typical usage scenario would be * layer1 - OSD * layer0 - framebuffer * video - video overlay */ val = MXR_LAYER_CFG_GRP1_VAL(3); val |= MXR_LAYER_CFG_GRP0_VAL(2); val |= MXR_LAYER_CFG_VP_VAL(1); mixer_reg_write(res, MXR_LAYER_CFG, val); /* setting background color */ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080); mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); /* setting graphical layers */ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ val |= MXR_GRP_CFG_WIN_BLEND_EN; val |= MXR_GRP_CFG_BLEND_PRE_MUL; val |= MXR_GRP_CFG_PIXEL_BLEND_EN; val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ /* the same configuration for both layers */ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); /* setting video layers */ val = MXR_GRP_CFG_ALPHA_VAL(0); mixer_reg_write(res, MXR_VIDEO_CFG, val); /* configuration of Video Processor Registers */ vp_win_reset(ctx); vp_default_filter(res); /* disable all layers */ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); } static void mixer_poweron(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mutex_lock(&ctx->mixer_mutex); if (ctx->powered) { mutex_unlock(&ctx->mixer_mutex); return; } ctx->powered = true; mutex_unlock(&ctx->mixer_mutex); pm_runtime_get_sync(ctx->dev); clk_enable(res->mixer); clk_enable(res->vp); clk_enable(res->sclk_mixer); mixer_reg_write(res, MXR_INT_EN, ctx->int_en); mixer_win_reset(ctx); } static void mixer_poweroff(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mutex_lock(&ctx->mixer_mutex); if (!ctx->powered) goto out; mutex_unlock(&ctx->mixer_mutex); ctx->int_en = mixer_reg_read(res, MXR_INT_EN); clk_disable(res->mixer); clk_disable(res->vp); clk_disable(res->sclk_mixer); pm_runtime_put_sync(ctx->dev); mutex_lock(&ctx->mixer_mutex); ctx->powered = false; out: mutex_unlock(&ctx->mixer_mutex); } static int mixer_enable_vblank(void *ctx, int pipe) { struct mixer_context *mixer_ctx = ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mixer_ctx->pipe = pipe; /* enable vsync interrupt */ mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, MXR_INT_EN_VSYNC); return 0; } static void mixer_disable_vblank(void *ctx) { struct mixer_context *mixer_ctx = ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); /* disable vsync interrupt */ mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); } static void mixer_dpms(void *ctx, int mode) { struct mixer_context *mixer_ctx = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); switch (mode) { case DRM_MODE_DPMS_ON: mixer_poweron(mixer_ctx); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: mixer_poweroff(mixer_ctx); break; default: DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); break; } } static void mixer_win_mode_set(void *ctx, struct exynos_drm_overlay *overlay) { struct mixer_context *mixer_ctx = ctx; struct hdmi_win_data *win_data; int win; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!overlay) { DRM_ERROR("overlay is NULL\n"); return; } DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n", overlay->fb_width, overlay->fb_height, overlay->fb_x, overlay->fb_y, overlay->crtc_width, overlay->crtc_height, overlay->crtc_x, overlay->crtc_y); win = overlay->zpos; if (win == DEFAULT_ZPOS) win = MIXER_DEFAULT_WIN; if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("mixer window[%d] is wrong\n", win); return; } win_data = &mixer_ctx->win_data[win]; win_data->dma_addr = overlay->dma_addr[0]; win_data->vaddr = overlay->vaddr[0]; win_data->chroma_dma_addr = overlay->dma_addr[1]; win_data->chroma_vaddr = overlay->vaddr[1]; win_data->pixel_format = overlay->pixel_format; win_data->bpp = overlay->bpp; win_data->crtc_x = overlay->crtc_x; win_data->crtc_y = overlay->crtc_y; win_data->crtc_width = overlay->crtc_width; win_data->crtc_height = overlay->crtc_height; win_data->fb_x = overlay->fb_x; win_data->fb_y = overlay->fb_y; win_data->fb_width = overlay->fb_width; win_data->fb_height = overlay->fb_height; win_data->src_width = overlay->src_width; win_data->src_height = overlay->src_height; win_data->mode_width = overlay->mode_width; win_data->mode_height = overlay->mode_height; win_data->scan_flags = overlay->scan_flag; } static void mixer_win_commit(void *ctx, int win) { struct mixer_context *mixer_ctx = ctx; DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); if (win > 1) vp_video_buffer(mixer_ctx, win); else mixer_graph_buffer(mixer_ctx, win); } static void mixer_win_disable(void *ctx, int win) { struct mixer_context *mixer_ctx = ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; unsigned long flags; DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); spin_lock_irqsave(&res->reg_slock, flags); mixer_vsync_set_update(mixer_ctx, false); mixer_cfg_layer(mixer_ctx, win, false); mixer_vsync_set_update(mixer_ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); } static struct exynos_mixer_ops mixer_ops = { /* manager */ .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .dpms = mixer_dpms, /* overlay */ .win_mode_set = mixer_win_mode_set, .win_commit = mixer_win_commit, .win_disable = mixer_win_disable, }; /* for pageflip event */ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) { struct exynos_drm_private *dev_priv = drm_dev->dev_private; struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, base.link) { /* if event's pipe isn't same as crtc then ignore it. */ if (crtc != e->pipe) continue; is_checked = true; do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } if (is_checked) /* * call drm_vblank_put only in case that drm_vblank_get was * called. */ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) drm_vblank_put(drm_dev, crtc); spin_unlock_irqrestore(&drm_dev->event_lock, flags); } static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; struct mixer_context *ctx = drm_hdmi_ctx->ctx; struct mixer_resources *res = &ctx->mixer_res; u32 val, base, shadow; spin_lock(&res->reg_slock); /* read interrupt status for handling and clearing flags for VSYNC */ val = mixer_reg_read(res, MXR_INT_STATUS); /* handling VSYNC */ if (val & MXR_INT_STATUS_VSYNC) { /* interlace scan need to check shadow register */ if (ctx->interlace) { base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); if (base != shadow) goto out; base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1)); shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1)); if (base != shadow) goto out; } drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); } out: /* clear interrupts */ if (~val & MXR_INT_EN_VSYNC) { /* vsync interrupt use different bit for read and clear */ val &= ~MXR_INT_EN_VSYNC; val |= MXR_INT_CLEAR_VSYNC; } mixer_reg_write(res, MXR_INT_STATUS, val); spin_unlock(&res->reg_slock); return IRQ_HANDLED; } static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, struct platform_device *pdev) { struct mixer_context *mixer_ctx = ctx->ctx; struct device *dev = &pdev->dev; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; int ret; spin_lock_init(&mixer_res->reg_slock); mixer_res->mixer = clk_get(dev, "mixer"); if (IS_ERR_OR_NULL(mixer_res->mixer)) { dev_err(dev, "failed to get clock 'mixer'\n"); ret = -ENODEV; goto fail; } mixer_res->vp = clk_get(dev, "vp"); if (IS_ERR_OR_NULL(mixer_res->vp)) { dev_err(dev, "failed to get clock 'vp'\n"); ret = -ENODEV; goto fail; } mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { dev_err(dev, "failed to get clock 'sclk_mixer'\n"); ret = -ENODEV; goto fail; } mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); ret = -ENODEV; goto fail; } mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { dev_err(dev, "failed to get clock 'sclk_dac'\n"); ret = -ENODEV; goto fail; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr"); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); ret = -ENXIO; goto fail; } clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); mixer_res->mixer_regs = ioremap(res->start, resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); ret = -ENXIO; goto fail; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp"); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); ret = -ENXIO; goto fail_mixer_regs; } mixer_res->vp_regs = ioremap(res->start, resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); ret = -ENXIO; goto fail_mixer_regs; } res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); if (res == NULL) { dev_err(dev, "get interrupt resource failed.\n"); ret = -ENXIO; goto fail_vp_regs; } ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); goto fail_vp_regs; } mixer_res->irq = res->start; return 0; fail_vp_regs: iounmap(mixer_res->vp_regs); fail_mixer_regs: iounmap(mixer_res->mixer_regs); fail: if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) clk_put(mixer_res->sclk_dac); if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) clk_put(mixer_res->sclk_hdmi); if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) clk_put(mixer_res->sclk_mixer); if (!IS_ERR_OR_NULL(mixer_res->vp)) clk_put(mixer_res->vp); if (!IS_ERR_OR_NULL(mixer_res->mixer)) clk_put(mixer_res->mixer); return ret; } static void mixer_resources_cleanup(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; free_irq(res->irq, ctx); iounmap(res->vp_regs); iounmap(res->mixer_regs); } static int __devinit mixer_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_drm_hdmi_context *drm_hdmi_ctx; struct mixer_context *ctx; int ret; dev_info(dev, "probe start\n"); drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_ERROR("failed to alloc mixer context.\n"); kfree(drm_hdmi_ctx); return -ENOMEM; } mutex_init(&ctx->mixer_mutex); ctx->dev = &pdev->dev; drm_hdmi_ctx->ctx = (void *)ctx; platform_set_drvdata(pdev, drm_hdmi_ctx); /* acquire resources: regs, irqs, clocks */ ret = mixer_resources_init(drm_hdmi_ctx, pdev); if (ret) goto fail; /* register specific callback point to common hdmi. */ exynos_mixer_ops_register(&mixer_ops); pm_runtime_enable(dev); return 0; fail: dev_info(dev, "probe failed\n"); return ret; } static int mixer_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_drm_hdmi_context *drm_hdmi_ctx = platform_get_drvdata(pdev); struct mixer_context *ctx = drm_hdmi_ctx->ctx; dev_info(dev, "remove successful\n"); pm_runtime_disable(&pdev->dev); mixer_resources_cleanup(ctx); return 0; } #ifdef CONFIG_PM_SLEEP static int mixer_suspend(struct device *dev) { struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); struct mixer_context *ctx = drm_hdmi_ctx->ctx; mixer_poweroff(ctx); return 0; } #endif static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); struct platform_driver mixer_driver = { .driver = { .name = "s5p-mixer", .owner = THIS_MODULE, .pm = &mixer_pm_ops, }, .probe = mixer_probe, .remove = __devexit_p(mixer_remove), };
gpl-2.0
bgill55/kernel_htc-mecha
arch/arm/kernel/kprobes-decode.c
675
48455
/* * arch/arm/kernel/kprobes-decode.c * * Copyright (C) 2006, 2007 Motorola Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * We do not have hardware single-stepping on ARM, This * effort is further complicated by the ARM not having a * "next PC" register. Instructions that change the PC * can't be safely single-stepped in a MP environment, so * we have a lot of work to do: * * In the prepare phase: * *) If it is an instruction that does anything * with the CPU mode, we reject it for a kprobe. * (This is out of laziness rather than need. The * instructions could be simulated.) * * *) Otherwise, decode the instruction rewriting its * registers to take fixed, ordered registers and * setting a handler for it to run the instruction. * * In the execution phase by an instruction's handler: * * *) If the PC is written to by the instruction, the * instruction must be fully simulated in software. * If it is a conditional instruction, the handler * will use insn[0] to copy its condition code to * set r0 to 1 and insn[1] to "mov pc, lr" to return. * * *) Otherwise, a modified form of the instruction is * directly executed. Its handler calls the * instruction in insn[0]. In insn[1] is a * "mov pc, lr" to return. * * Before calling, load up the reordered registers * from the original instruction's registers. If one * of the original input registers is the PC, compute * and adjust the appropriate input register. * * After call completes, copy the output registers to * the original instruction's original registers. * * We don't use a real breakpoint instruction since that * would have us in the kernel go from SVC mode to SVC * mode losing the link register. Instead we use an * undefined instruction. To simplify processing, the * undefined instruction used for kprobes must be reserved * exclusively for kprobes use. * * TODO: ifdef out some instruction decoding based on architecture. */ #include <linux/kernel.h> #include <linux/kprobes.h> #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) #define PSR_fs (PSR_f|PSR_s) #define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ #define SET_R0_TRUE_INSTRUCTION 0xe3a00001 /* mov r0, #1 */ #define truecc_insn(insn) (((insn) & 0xf0000000) | \ (SET_R0_TRUE_INSTRUCTION & 0x0fffffff)) typedef long (insn_0arg_fn_t)(void); typedef long (insn_1arg_fn_t)(long); typedef long (insn_2arg_fn_t)(long, long); typedef long (insn_3arg_fn_t)(long, long, long); typedef long (insn_4arg_fn_t)(long, long, long, long); typedef long long (insn_llret_0arg_fn_t)(void); typedef long long (insn_llret_3arg_fn_t)(long, long, long); typedef long long (insn_llret_4arg_fn_t)(long, long, long, long); union reg_pair { long long dr; #ifdef __LITTLE_ENDIAN struct { long r0, r1; }; #else struct { long r1, r0; }; #endif }; /* * For STR and STM instructions, an ARM core may choose to use either * a +8 or a +12 displacement from the current instruction's address. * Whichever value is chosen for a given core, it must be the same for * both instructions and may not change. This function measures it. */ static int str_pc_offset; static void __init find_str_pc_offset(void) { int addr, scratch, ret; __asm__ ( "sub %[ret], pc, #4 \n\t" "str pc, %[addr] \n\t" "ldr %[scr], %[addr] \n\t" "sub %[ret], %[scr], %[ret] \n\t" : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); str_pc_offset = ret; } /* * The insnslot_?arg_r[w]flags() functions below are to keep the * msr -> *fn -> mrs instruction sequences indivisible so that * the state of the CPSR flags aren't inadvertently modified * just before or just after the call. */ static inline long __kprobes insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn) { register long ret asm("r0"); __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret) : [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); return ret; } static inline long long __kprobes insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn) { register long ret0 asm("r0"); register long ret1 asm("r1"); union reg_pair fnr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret0), "=r" (ret1) : [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); fnr.r0 = ret0; fnr.r1 = ret1; return fnr.dr; } static inline long __kprobes insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long ret asm("r0"); __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret) : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); return ret; } static inline long __kprobes insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long ret asm("r0"); __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret) : "0" (rr0), "r" (rr1), [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); return ret; } static inline long __kprobes insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long ret asm("r0"); __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret) : "0" (rr0), "r" (rr1), "r" (rr2), [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); return ret; } static inline long long __kprobes insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_llret_3arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long ret0 asm("r0"); register long ret1 asm("r1"); union reg_pair fnr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret0), "=r" (ret1) : "0" (rr0), "r" (rr1), "r" (rr2), [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); fnr.r0 = ret0; fnr.r1 = ret1; return fnr.dr; } static inline long __kprobes insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr, insn_4arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long rr3 asm("r3") = r3; register long ret asm("r0"); __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" : "=r" (ret) : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), [cpsr] "r" (cpsr), [fn] "r" (fn) : "lr", "cc" ); return ret; } static inline long __kprobes insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long ret asm("r0"); long oldcpsr = *cpsr; long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" "mrs %[newcpsr], cpsr \n\t" : "=r" (ret), [newcpsr] "=r" (newcpsr) : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) : "lr", "cc" ); *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); return ret; } static inline long __kprobes insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long ret asm("r0"); long oldcpsr = *cpsr; long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" "mrs %[newcpsr], cpsr \n\t" : "=r" (ret), [newcpsr] "=r" (newcpsr) : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) : "lr", "cc" ); *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); return ret; } static inline long __kprobes insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr, insn_3arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long ret asm("r0"); long oldcpsr = *cpsr; long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" "mrs %[newcpsr], cpsr \n\t" : "=r" (ret), [newcpsr] "=r" (newcpsr) : "0" (rr0), "r" (rr1), "r" (rr2), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) : "lr", "cc" ); *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); return ret; } static inline long __kprobes insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, insn_4arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long rr3 asm("r3") = r3; register long ret asm("r0"); long oldcpsr = *cpsr; long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" "mrs %[newcpsr], cpsr \n\t" : "=r" (ret), [newcpsr] "=r" (newcpsr) : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) : "lr", "cc" ); *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); return ret; } static inline long long __kprobes insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, insn_llret_4arg_fn_t *fn) { register long rr0 asm("r0") = r0; register long rr1 asm("r1") = r1; register long rr2 asm("r2") = r2; register long rr3 asm("r3") = r3; register long ret0 asm("r0"); register long ret1 asm("r1"); long oldcpsr = *cpsr; long newcpsr; union reg_pair fnr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov lr, pc \n\t" "mov pc, %[fn] \n\t" "mrs %[newcpsr], cpsr \n\t" : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr) : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) : "lr", "cc" ); *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); fnr.r0 = ret0; fnr.r1 = ret1; return fnr.dr; } /* * To avoid the complications of mimicing single-stepping on a * processor without a Next-PC or a single-step mode, and to * avoid having to deal with the side-effects of boosting, we * simulate or emulate (almost) all ARM instructions. * * "Simulation" is where the instruction's behavior is duplicated in * C code. "Emulation" is where the original instruction is rewritten * and executed, often by altering its registers. * * By having all behavior of the kprobe'd instruction completed before * returning from the kprobe_handler(), all locks (scheduler and * interrupt) can safely be released. There is no need for secondary * breakpoints, no race with MP or preemptable kernels, nor having to * clean up resources counts at a later time impacting overall system * performance. By rewriting the instruction, only the minimum registers * need to be loaded and saved back optimizing performance. * * Calling the insnslot_*_rwflags version of a function doesn't hurt * anything even when the CPSR flags aren't updated by the * instruction. It's just a little slower in return for saving * a little space by not having a duplicate function that doesn't * update the flags. (The same optimization can be said for * instructions that do or don't perform register writeback) * Also, instructions can either read the flags, only write the * flags, or read and write the flags. To save combinations * rather than for sheer performance, flag functions just assume * read and write of flags. */ static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long iaddr = (long)p->addr; int disp = branch_displacement(insn); if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) return; if (insn & (1 << 24)) regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp; } static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) { kprobe_opcode_t insn = p->opcode; long iaddr = (long)p->addr; int disp = branch_displacement(insn); regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); regs->ARM_cpsr |= PSR_T_BIT; } static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rm = insn & 0xf; long rmv = regs->uregs[rm]; if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) return; if (insn & (1 << 5)) regs->ARM_lr = (long)p->addr + 4; regs->ARM_pc = rmv & ~0x1; regs->ARM_cpsr &= ~PSR_T_BIT; if (rmv & 0x1) regs->ARM_cpsr |= PSR_T_BIT; } static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rn = (insn >> 16) & 0xf; int lbit = insn & (1 << 20); int wbit = insn & (1 << 21); int ubit = insn & (1 << 23); int pbit = insn & (1 << 24); long *addr = (long *)regs->uregs[rn]; int reg_bit_vector; int reg_count; if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) return; reg_count = 0; reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { reg_bit_vector &= (reg_bit_vector - 1); ++reg_count; } if (!ubit) addr -= reg_count; addr += (!pbit == !ubit); reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { int reg = __ffs(reg_bit_vector); reg_bit_vector &= (reg_bit_vector - 1); if (lbit) regs->uregs[reg] = *addr++; else *addr++ = regs->uregs[reg]; } if (wbit) { if (!ubit) addr -= reg_count; addr -= (!pbit == !ubit); regs->uregs[rn] = (long)addr; } } static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) return; regs->ARM_pc = (long)p->addr + str_pc_offset; simulate_ldm1stm1(p, regs); regs->ARM_pc = (long)p->addr + 4; } static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) { regs->uregs[12] = regs->uregs[13]; } static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rn = (insn >> 16) & 0xf; long rnv = regs->uregs[rn]; /* Save Rn in case of writeback. */ regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; /* rm may be invalid, don't care. */ /* Not following the C calling convention here, so need asm(). */ __asm__ __volatile__ ( "ldr r0, %[rn] \n\t" "ldr r1, %[rm] \n\t" "msr cpsr_fs, %[cpsr]\n\t" "mov lr, pc \n\t" "mov pc, %[i_fn] \n\t" "str r0, %[rn] \n\t" /* in case of writeback */ "str r2, %[rd0] \n\t" "str r3, %[rd1] \n\t" : [rn] "+m" (regs->uregs[rn]), [rd0] "=m" (regs->uregs[rd]), [rd1] "=m" (regs->uregs[rd+1]) : [rm] "m" (regs->uregs[rm]), [cpsr] "r" (regs->ARM_cpsr), [i_fn] "r" (i_fn) : "r0", "r1", "r2", "r3", "lr", "cc" ); } static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) { insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rnv = regs->uregs[rn]; long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], regs->uregs[rd+1], regs->ARM_cpsr, i_fn); } static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) { insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; union reg_pair fnr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rdv; long rnv = regs->uregs[rn]; long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ long cpsr = regs->ARM_cpsr; fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ rdv = fnr.r1; if (rd == 15) { #if __LINUX_ARM_ARCH__ >= 5 cpsr &= ~PSR_T_BIT; if (rdv & 0x1) cpsr |= PSR_T_BIT; regs->ARM_cpsr = cpsr; rdv &= ~0x1; #else rdv &= ~0x2; #endif } regs->uregs[rd] = rdv; } static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) { insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long iaddr = (long)p->addr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ /* Save Rn in case of writeback. */ regs->uregs[rn] = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) { insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; union reg_pair fnr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn); regs->uregs[rn] = fnr.r0; regs->uregs[rd] = fnr.r1; } static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; long rnv = regs->uregs[rn]; long rdv = regs->uregs[rd]; insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rm = insn & 0xf; long rmv = regs->uregs[rm]; /* Writes Q flag */ regs->uregs[rd] = insnslot_1arg_rwflags(rmv, &regs->ARM_cpsr, i_fn); } static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rnv = regs->uregs[rn]; long rmv = regs->uregs[rm]; /* Reads GE bits */ regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs) { insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs) { insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int ird = (insn >> 12) & 0xf; insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rn = (insn >> 16) & 0xf; long rnv = regs->uregs[rn]; insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rm = insn & 0xf; long rmv = regs->uregs[rm]; regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rnv = regs->uregs[rn]; long rmv = regs->uregs[rm]; regs->uregs[rd] = insnslot_2arg_rwflags(rnv, rmv, &regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 16) & 0xf; int rn = (insn >> 12) & 0xf; int rs = (insn >> 8) & 0xf; int rm = insn & 0xf; long rnv = regs->uregs[rn]; long rsv = regs->uregs[rs]; long rmv = regs->uregs[rm]; regs->uregs[rd] = insnslot_3arg_rwflags(rnv, rsv, rmv, &regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 16) & 0xf; int rs = (insn >> 8) & 0xf; int rm = insn & 0xf; long rsv = regs->uregs[rs]; long rmv = regs->uregs[rm]; regs->uregs[rd] = insnslot_2arg_rwflags(rsv, rmv, &regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; union reg_pair fnr; int rdhi = (insn >> 16) & 0xf; int rdlo = (insn >> 12) & 0xf; int rs = (insn >> 8) & 0xf; int rm = insn & 0xf; long rsv = regs->uregs[rs]; long rmv = regs->uregs[rm]; fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi], regs->uregs[rdlo], rsv, rmv, &regs->ARM_cpsr, i_fn); regs->uregs[rdhi] = fnr.r0; regs->uregs[rdlo] = fnr.r1; } static void __kprobes emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn); } static void __kprobes emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) { insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ int rm = insn & 0xf; long rnv = (rn == 15) ? ppc : regs->uregs[rn]; long rmv = (rm == 15) ? ppc : regs->uregs[rm]; long rsv = regs->uregs[rs]; regs->uregs[rd] = insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs) { insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ int rm = insn & 0xf; long rnv = (rn == 15) ? ppc : regs->uregs[rn]; long rmv = (rm == 15) ? ppc : regs->uregs[rm]; long rsv = regs->uregs[rs]; regs->uregs[rd] = insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn); } static enum kprobe_insn __kprobes prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) { int ibit = (insn & (1 << 26)) ? 25 : 22; insn &= 0xfff00fff; insn |= 0x00001000; /* Rn = r0, Rd = r1 */ if (insn & (1 << ibit)) { insn &= ~0xf; insn |= 2; /* Rm = r2 */ } asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_rd12rm0; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_rd12; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; asi->insn_handler = emulate_rd12rn16rm0_rwflags; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; asi->insn_handler = emulate_rd16rs8rm0_rwflags; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ insn |= 0x00000102; /* Rs = r1, Rm = r2 */ asi->insn[0] = insn; asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags; return INSN_GOOD; } static enum kprobe_insn __kprobes prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ insn |= 0x00001203; /* Rs = r2, Rm = r3 */ asi->insn[0] = insn; asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags; return INSN_GOOD; } /* * For the instruction masking and comparisons in all the "space_*" * functions below, Do _not_ rearrange the order of tests unless * you're very, very sure of what you are doing. For the sake of * efficiency, the masks for some tests sometimes assume other test * have been done prior to them so the number of patterns to test * for an instruction set can be as broad as possible to reduce the * number of tests needed. */ static enum kprobe_insn __kprobes space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */ /* RFE : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */ /* SRS : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */ if ((insn & 0xfff30020) == 0xf1020000 || (insn & 0xfe500f00) == 0xf8100a00 || (insn & 0xfe5f0f00) == 0xf84d0500) return INSN_REJECTED; /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */ if ((insn & 0xfd700000) == 0xf4500000) { insn &= 0xfff0ffff; /* Rn = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_rn16; return INSN_GOOD; } /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ if ((insn & 0xfe000000) == 0xfa000000) { asi->insn_handler = simulate_blx1; return INSN_GOOD_NO_SLOT; } /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ if ((insn & 0xffff00f0) == 0xf1010000 || (insn & 0xff000010) == 0xfe000000) { asi->insn[0] = insn; asi->insn_handler = emulate_none; return INSN_GOOD; } /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ if ((insn & 0xffe00000) == 0xfc400000) { insn &= 0xfff00fff; /* Rn = r0 */ insn |= 0x00001000; /* Rd = r1 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; return INSN_GOOD; } /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ if ((insn & 0xfe000000) == 0xfc000000) { insn &= 0xfff0ffff; /* Rn = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_ldcstc; return INSN_GOOD; } /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ if ((insn & 0x0f900010) == 0x01000000) { /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ if ((insn & 0x0ff000f0) == 0x01200020 || (insn & 0x0fb000f0) == 0x01200000) return INSN_REJECTED; /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */ if ((insn & 0x0fb00010) == 0x01000000) return prep_emulate_rd12(insn, asi); /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ if ((insn & 0x0ff00090) == 0x01400080) return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ if ((insn & 0x0ff000b0) == 0x012000a0 || (insn & 0x0ff00090) == 0x01600080) return prep_emulate_rd16rs8rm0_wflags(insn, asi); /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */ return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); } /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ else if ((insn & 0x0f900090) == 0x01000010) { /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ if ((insn & 0xfff000f0) == 0xe1200070) return INSN_REJECTED; /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ if ((insn & 0x0ff000d0) == 0x01200010) { asi->insn[0] = truecc_insn(insn); asi->insn_handler = simulate_blx2bx; return INSN_GOOD; } /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ if ((insn & 0x0ff000f0) == 0x01600010) return prep_emulate_rd12rm0(insn, asi); /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */ /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ return prep_emulate_rd12rn16rm0_wflags(insn, asi); } /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ else if ((insn & 0x0f000090) == 0x00000090) { /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */ /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */ /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ if ((insn & 0x0fe000f0) == 0x00000090) { return prep_emulate_rd16rs8rm0_wflags(insn, asi); } else if ((insn & 0x0fe000f0) == 0x00200090) { return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); } else { return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); } } /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ else if ((insn & 0x0e000090) == 0x00000090) { /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ if ((insn & 0x0fb000f0) == 0x01000090) { /* SWP/SWPB */ return prep_emulate_rd12rn16rm0_wflags(insn, asi); } else if ((insn & 0x0e1000d0) == 0x00000d0) { /* STRD/LDRD */ insn &= 0xfff00fff; insn |= 0x00002000; /* Rn = r0, Rd = r2 */ if (insn & (1 << 22)) { /* I bit */ insn &= ~0xf; insn |= 1; /* Rm = r1 */ } asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 5)) ? emulate_strd : emulate_ldrd; return INSN_GOOD; } return prep_emulate_ldr_str(insn, asi); } /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ /* * ALU op with S bit and Rd == 15 : * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0e10f000) == 0x0010f000) return INSN_REJECTED; /* * "mov ip, sp" is the most common kprobe'd instruction by far. * Check and optimize for it explicitly. */ if (insn == 0xe1a0c00d) { asi->insn_handler = simulate_mov_ipsp; return INSN_GOOD_NO_SLOT; } /* * Data processing: Immediate-shift / Register-shift * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */ insn |= 0x00000001; /* Rm = r1 */ if (insn & 0x010) { insn &= 0xfffff0ff; /* register shift */ insn |= 0x00000200; /* Rs = r2 */ } asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_rwflags : emulate_alu_rflags; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx * ALU op with S bit and Rd == 15 : * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ return INSN_REJECTED; /* * Data processing: 32-bit Immediate * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_imm_rwflags : emulate_alu_imm_rflags; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ if ((insn & 0x0ff000f0) == 0x068000b0) { insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; asi->insn_handler = emulate_sel; return INSN_GOOD; } /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */ /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */ /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */ /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ if ((insn & 0x0fa00030) == 0x06a00010 || (insn & 0x0fb000f0) == 0x06a00030) { insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_sat; return INSN_GOOD; } /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ if ((insn & 0x0ff00070) == 0x06b00030 || (insn & 0x0ff000f0) == 0x06f000b0) return prep_emulate_rd12rm0(insn, asi); /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ /* SXTB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ return prep_emulate_rd12rn16rm0_wflags(insn, asi); } static enum kprobe_insn __kprobes space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ if ((insn & 0x0ff000f0) == 0x03f000f0) return INSN_REJECTED; /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ if ((insn & 0x0ff000f0) == 0x07800010) return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ if ((insn & 0x0ff00090) == 0x07400010) return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ if ((insn & 0x0ff00090) == 0x07000010 || (insn & 0x0ff000d0) == 0x07500010 || (insn & 0x0ff000d0) == 0x075000d0) return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); /* SMUSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx : */ /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ return prep_emulate_rd16rs8rm0_wflags(insn, asi); } static enum kprobe_insn __kprobes space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */ /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ return prep_emulate_ldr_str(insn, asi); } static enum kprobe_insn __kprobes space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */ /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ if ((insn & 0x0e708000) == 0x85000000 || (insn & 0x0e508000) == 0x85010000) return INSN_REJECTED; /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ asi->insn[0] = truecc_insn(insn); asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ simulate_stm1_pc : simulate_ldm1stm1; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ asi->insn[0] = truecc_insn(insn); asi->insn_handler = simulate_bbl; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ insn &= 0xfff00fff; insn |= 0x00001000; /* Rn = r0, Rd = r1 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ insn &= 0xfff0ffff; /* Rn = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_ldcstc; return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ /* SWI : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ if ((insn & 0xfff000f0) == 0xe1200070 || (insn & 0x0f000000) == 0x0f000000) return INSN_REJECTED; /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ if ((insn & 0x0f000010) == 0x0e000000) { asi->insn[0] = insn; asi->insn_handler = emulate_none; return INSN_GOOD; } /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; return INSN_GOOD; } /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. * * For instructions we don't want to kprobe (INSN_REJECTED return result): * These are generally ones that modify the processor state making * them "hard" to simulate such as switches processor modes or * make accesses in alternate modes. Any of these could be simulated * if the work was put into it, but low return considering they * should also be very rare. */ enum kprobe_insn __kprobes arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { asi->insn[1] = KPROBE_RETURN_INSTRUCTION; if ((insn & 0xf0000000) == 0xf0000000) { return space_1111(insn, asi); } else if ((insn & 0x0e000000) == 0x00000000) { return space_cccc_000x(insn, asi); } else if ((insn & 0x0e000000) == 0x02000000) { return space_cccc_001x(insn, asi); } else if ((insn & 0x0f000010) == 0x06000010) { return space_cccc_0110__1(insn, asi); } else if ((insn & 0x0f000010) == 0x07000010) { return space_cccc_0111__1(insn, asi); } else if ((insn & 0x0c000000) == 0x04000000) { return space_cccc_01xx(insn, asi); } else if ((insn & 0x0e000000) == 0x08000000) { return space_cccc_100x(insn, asi); } else if ((insn & 0x0e000000) == 0x0a000000) { return space_cccc_101x(insn, asi); } else if ((insn & 0x0fe00000) == 0x0c400000) { return space_cccc_1100_010x(insn, asi); } else if ((insn & 0x0e000000) == 0x0c400000) { return space_cccc_110x(insn, asi); } return space_cccc_111x(insn, asi); } void __init arm_kprobe_decode_init(void) { find_str_pc_offset(); } /* * All ARM instructions listed below. * * Instructions and their general purpose registers are given. * If a particular register may not use R15, it is prefixed with a "!". * If marked with a "*" means the value returned by reading R15 * is implementation defined. * * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ * TST: Rd, Rn, Rm, !Rs * BX: Rm * BLX(2): !Rm * BX: Rm (R15 legal, but discouraged) * BXJ: !Rm, * CLZ: !Rd, !Rm * CPY: Rd, Rm * LDC/2,STC/2 immediate offset & unindex: Rn * LDC/2,STC/2 immediate pre/post-indexed: !Rn * LDM(1/3): !Rn, register_list * LDM(2): !Rn, !register_list * LDR,STR,PLD immediate offset: Rd, Rn * LDR,STR,PLD register offset: Rd, Rn, !Rm * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm * LDR,STR immediate pre/post-indexed: Rd, !Rn * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm * LDRB,STRB immediate offset: !Rd, Rn * LDRB,STRB register offset: !Rd, Rn, !Rm * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm * LDREX: !Rd, !Rn * MCR/2: !Rd * MCRR/2,MRRC/2: !Rd, !Rn * MLA: !Rd, !Rn, !Rm, !Rs * MOV: Rd * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register) * MRS,MSR: !Rd * MUL: !Rd, !Rm, !Rs * PKH{BT,TB}: !Rd, !Rn, !Rm * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn * REV/16/SH: !Rd, !Rm * RFE: !Rn * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm * SEL: !Rd, !Rn, !Rm * SMLA<x><y>,SMLA{D,W<y>},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs * SMLAL<x><y>,SMLA{D,LD},SMLSLD,SMMULL,SMULW<y>: !RdHi, !RdLo, !Rm, !Rs * SMMUL,SMUAD,SMUL<x><y>,SMUSD: !Rd, !Rm, !Rs * SSAT/16: !Rd, !Rm * STM(1/2): !Rn, register_list* (R15 in reg list not recommended) * STRT immediate pre/post-indexed: Rd*, !Rn * STRT register pre/post-indexed: Rd*, !Rn, !Rm * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm * STREX: !Rd, !Rn, !Rm * SWP/B: !Rd, !Rn, !Rm * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm * {S,U}XT{B,B16,H}: !Rd, !Rm * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs * * May transfer control by writing R15 (possible mode changes or alternate * mode accesses marked by "*"): * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY, * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI* * * Instructions that do not take general registers, nor transfer control: * CDP/2, SETEND, SRS* */
gpl-2.0
VorkTeam/vorkKernel-DESIRE
arch/arm/mach-realview/platsmp.c
1443
5189
/* * linux/arch/arm/mach-realview/platsmp.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/localtimer.h> #include <asm/unified.h> #include <mach/board-eb.h> #include <mach/board-pb11mp.h> #include <mach/board-pbx.h> #include <asm/smp_scu.h> #include "core.h" extern void realview_secondary_startup(void); /* * control for which core is the next to come out of the secondary * boot "holding pen" */ volatile int __cpuinitdata pen_release = -1; static void __iomem *scu_base_addr(void) { if (machine_is_realview_eb_mp()) return __io_address(REALVIEW_EB11MP_SCU_BASE); else if (machine_is_realview_pb11mp()) return __io_address(REALVIEW_TC11MP_SCU_BASE); else if (machine_is_realview_pbx() && (core_tile_pbx11mp() || core_tile_pbxa9mp())) return __io_address(REALVIEW_PBX_TILE_SCU_BASE); else return (void __iomem *)0; } static inline unsigned int get_core_count(void) { void __iomem *scu_base = scu_base_addr(); if (scu_base) return scu_get_core_count(scu_base); return 1; } static DEFINE_SPINLOCK(boot_lock); void __cpuinit platform_secondary_init(unsigned int cpu) { trace_hardirqs_off(); /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ gic_cpu_init(0, gic_cpu_base_addr); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ pen_release = -1; smp_wmb(); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ pen_release = cpu; flush_cache_all(); /* * XXX * * This is a later addition to the booting protocol: the * bootMonitor now puts secondary cores into WFI, so * poke_milo() no longer gets the cores moving; we need * to send a soft interrupt to wake the secondary core. * Use smp_cross_call() for this, since there's little * point duplicating the code here */ smp_cross_call(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } static void __init poke_milo(void) { /* nobody is to be released from the pen yet */ pen_release = -1; /* * Write the address of secondary startup into the system-wide flags * register. The BootMonitor waits for this register to become * non-zero. */ __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)), __io_address(REALVIEW_SYS_FLAGSSET)); mb(); } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "Realview: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. Note that, on modern versions of * MILO, the "poke" doesn't actually do anything until each * individual core is sent a soft interrupt to get it out of * WFI */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable(scu_base_addr()); poke_milo(); } }
gpl-2.0
Thrive-Hackers/Ubuntu-Touch-Kernel
lib/raid6/recov.c
3235
3571
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright 2002 H. Peter Anvin - All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 53 Temple Place Ste 330, * Boston MA 02111-1307, USA; either version 2 of the License, or * (at your option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * raid6/recov.c * * RAID-6 data recovery in dual failure mode. In single failure mode, * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct * the syndrome.) */ #include <linux/raid/pq.h> /* Recover two failed data blocks. */ void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) { u8 *p, *q, *dp, *dq; u8 px, qx, db; const u8 *pbmul; /* P multiplier table for B data */ const u8 *qmul; /* Q multiplier table (for both) */ p = (u8 *)ptrs[disks-2]; q = (u8 *)ptrs[disks-1]; /* Compute syndrome with zero for the missing data pages Use the dead data pages as temporary storage for delta p and delta q */ dp = (u8 *)ptrs[faila]; ptrs[faila] = (void *)raid6_empty_zero_page; ptrs[disks-2] = dp; dq = (u8 *)ptrs[failb]; ptrs[failb] = (void *)raid6_empty_zero_page; ptrs[disks-1] = dq; raid6_call.gen_syndrome(disks, bytes, ptrs); /* Restore pointer table */ ptrs[faila] = dp; ptrs[failb] = dq; ptrs[disks-2] = p; ptrs[disks-1] = q; /* Now, pick the proper data tables */ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]]; qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]]; /* Now do it... */ while ( bytes-- ) { px = *p ^ *dp; qx = qmul[*q ^ *dq]; *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */ *dp++ = db ^ px; /* Reconstructed A */ p++; q++; } } EXPORT_SYMBOL_GPL(raid6_2data_recov); /* Recover failure of one data block plus the P block */ void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ p = (u8 *)ptrs[disks-2]; q = (u8 *)ptrs[disks-1]; /* Compute syndrome with zero for the missing data page Use the dead data page as temporary storage for delta q */ dq = (u8 *)ptrs[faila]; ptrs[faila] = (void *)raid6_empty_zero_page; ptrs[disks-1] = dq; raid6_call.gen_syndrome(disks, bytes, ptrs); /* Restore pointer table */ ptrs[faila] = dq; ptrs[disks-1] = q; /* Now, pick the proper data tables */ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]]; /* Now do it... */ while ( bytes-- ) { *p++ ^= *dq = qmul[*q ^ *dq]; q++; dq++; } } EXPORT_SYMBOL_GPL(raid6_datap_recov); #ifndef __KERNEL__ /* Testing only */ /* Recover two failed blocks. */ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) { if ( faila > failb ) { int tmp = faila; faila = failb; failb = tmp; } if ( failb == disks-1 ) { if ( faila == disks-2 ) { /* P+Q failure. Just rebuild the syndrome. */ raid6_call.gen_syndrome(disks, bytes, ptrs); } else { /* data+Q failure. Reconstruct data from P, then rebuild syndrome. */ /* NOT IMPLEMENTED - equivalent to RAID-5 */ } } else { if ( failb == disks-2 ) { /* data+P failure. */ raid6_datap_recov(disks, bytes, faila, ptrs); } else { /* data+data failure. */ raid6_2data_recov(disks, bytes, faila, failb, ptrs); } } } #endif
gpl-2.0
Drgravy/g3stock
arch/powerpc/kernel/signal_64.c
3491
14863
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/i386/kernel/signal.c" * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/ratelimit.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/vdso.h> #include <asm/switch_to.h> #include "signal.h" #define DEBUG_SIG 0 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) #define FP_REGS_SIZE sizeof(elf_fpregset_t) #define TRAMP_TRACEBACK 3 #define TRAMP_SIZE 6 /* * When we have signals to deliver, we set up on the user stack, * going down from the original stack pointer: * 1) a rt_sigframe struct which contains the ucontext * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller * frame for the signal handler. */ struct rt_sigframe { /* sys_rt_sigreturn requires the ucontext be the first field */ struct ucontext uc; unsigned long _unused[2]; unsigned int tramp[TRAMP_SIZE]; struct siginfo __user *pinfo; void __user *puc; struct siginfo info; /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ char abigap[288]; } __attribute__ ((aligned (16))); static const char fmt32[] = KERN_INFO \ "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; static const char fmt64[] = KERN_INFO \ "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; /* * Set up the sigcontext for the signal frame. */ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler, int ctx_has_vsx_region) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation * Userland shall check AT_HWCAP to know wether it can rely on the * v_regs pointer or not */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful); #endif unsigned long msr = regs->msr; long err = 0; flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); #endif /* CONFIG_ALTIVEC */ flush_fp_to_thread(current); /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, * then out to userspace. Update v_regs to point after the * VMX data. */ if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ err |= __put_user(&sc->gp_regs, &sc->regs); WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); err |= __put_user(msr, &sc->gp_regs[PT_MSR]); err |= __put_user(signr, &sc->signal); err |= __put_user(handler, &sc->handler); if (set != NULL) err |= __put_user(set->sig[0], &sc->oldmask); return err; } /* * Restore the sigcontext from the signal frame. */ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, struct sigcontext __user *sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs; #endif unsigned long err = 0; unsigned long save_r13 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* If this is not a signal return, we preserve the TLS in r13 */ if (!sig) save_r13 = regs->gpr[13]; /* copy the GPRs */ err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); /* skip SOFTE */ regs->trap = 0; err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); if (!sig) regs->gpr[13] = save_r13; if (set != NULL) err |= __get_user(set->sig[0], &sc->oldmask); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != 0 && (msr & MSR_VEC) != 0) err |= __copy_from_user(current->thread.vr, v_regs, 33 * sizeof(vector128)); else if (current->thread.used_vr) memset(current->thread.vr, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ if (v_regs != 0) err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); else current->thread.vrsave = 0; #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ v_regs += ELF_NVRREG; if ((msr & MSR_VSX) != 0) err |= copy_vsx_from_user(current, v_regs); else for (i = 0; i < 32 ; i++) current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; #endif return err; } /* * Setup the trampoline code on the stack */ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) { int i; long err = 0; /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); /* li r0, __NR_[rt_]sigreturn| */ err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); /* sc */ err |= __put_user(0x44000002UL, &tramp[2]); /* Minimal traceback info */ for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) err |= __put_user(0, &tramp[i]); if (!err) flush_icache_range((unsigned long) &tramp[0], (unsigned long) &tramp[TRAMP_SIZE]); return err; } /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. */ #define UCONTEXTSIZEWITHOUTVSX \ (sizeof(struct ucontext) - 32*sizeof(long)) /* * Handle {get,set,swap}_context operations */ int sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) { unsigned char tmp; sigset_t set; unsigned long new_msr = 0; int ctx_has_vsx_region = 0; if (new_ctx && get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) return -EFAULT; /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; if (old_ctx != NULL) { if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, ctx_has_vsx_region) || __copy_to_user(&old_ctx->uc_sigmask, &current->blocked, sizeof(sigset_t))) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) do_exit(SIGSEGV); restore_sigmask(&set); if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) do_exit(SIGSEGV); /* This returns like rt_sigreturn */ set_thread_flag(TIF_RESTOREALL); return 0; } /* * Do a signal return; undo the signal stack. */ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; restore_sigmask(&set); if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; /* do_sigaltstack expects a __user pointer and won't modify * what's in there anyway */ do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); set_thread_flag(TIF_RESTOREALL); return 0; badframe: #if DEBUG_SIG printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; } int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ func_descr_t __user *funct_desc_ptr; struct rt_sigframe __user *frame; unsigned long newsp = 0; long err = 0; frame = get_sigframe(ka, regs, sizeof(*frame), 0); if (unlikely(frame == NULL)) goto badframe; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto badframe; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->gpr[1]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, (unsigned long)ka->sa.sa_handler, 1); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ current->thread.fpscr.val = 0; /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; regs->link = (unsigned long) &frame->tramp[0]; } funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler; /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); /* Set up "regs" so we "return" to the signal handler. */ err |= get_user(regs->nip, &funct_desc_ptr->entry); /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; regs->gpr[1] = newsp; err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); regs->gpr[3] = signr; regs->result = 0; if (ka->sa.sa_flags & SA_SIGINFO) { err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); regs->gpr[6] = (unsigned long) frame; } else { regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; } if (err) goto badframe; return 1; badframe: #if DEBUG_SIG printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "setup_rt_frame", (long)frame, regs->nip, regs->link); force_sigsegv(signr, current); return 0; }
gpl-2.0
C457/android_kernel_kyocera_c5155
drivers/video/omap/lcd_palmte.c
4259
2845
/* * LCD panel support for the Palm Tungsten E * * Original version : Romain Goyet <r.goyet@gmail.com> * Current version : Laurent Gonzalez <palmte.linux@free.fr> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <plat/fpga.h> #include "omapfb.h" static int palmte_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void palmte_panel_cleanup(struct lcd_panel *panel) { } static int palmte_panel_enable(struct lcd_panel *panel) { return 0; } static void palmte_panel_disable(struct lcd_panel *panel) { } static unsigned long palmte_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel palmte_panel = { .name = "palmte", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | OMAP_LCDC_HSVS_OPPOSITE, .data_lines = 16, .bpp = 8, .pixel_clock = 12000, .x_res = 320, .y_res = 320, .hsw = 4, .hfp = 8, .hbp = 28, .vsw = 1, .vfp = 8, .vbp = 7, .pcd = 0, .init = palmte_panel_init, .cleanup = palmte_panel_cleanup, .enable = palmte_panel_enable, .disable = palmte_panel_disable, .get_caps = palmte_panel_get_caps, }; static int palmte_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&palmte_panel); return 0; } static int palmte_panel_remove(struct platform_device *pdev) { return 0; } static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int palmte_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver palmte_panel_driver = { .probe = palmte_panel_probe, .remove = palmte_panel_remove, .suspend = palmte_panel_suspend, .resume = palmte_panel_resume, .driver = { .name = "lcd_palmte", .owner = THIS_MODULE, }, }; static int __init palmte_panel_drv_init(void) { return platform_driver_register(&palmte_panel_driver); } static void __exit palmte_panel_drv_cleanup(void) { platform_driver_unregister(&palmte_panel_driver); } module_init(palmte_panel_drv_init); module_exit(palmte_panel_drv_cleanup);
gpl-2.0
CyanogenMod/android_kernel_htc_msm8960
arch/arm/mach-shmobile/setup-r8a7779.c
4771
7618
/* * r8a7779 processor support * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2011 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_intc.h> #include <linux/sh_timer.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/r8a7779.h> #include <mach/common.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <asm/hardware/cache-l2x0.h> static struct map_desc r8a7779_io_desc[] __initdata = { /* 2M entity map for 0xf0000000 (MPCORE) */ { .virtual = 0xf0000000, .pfn = __phys_to_pfn(0xf0000000), .length = SZ_2M, .type = MT_DEVICE_NONSHARED }, /* 16M entity map for 0xfexxxxxx (DMAC-S/HPBREG/INTC2/LRAM/DBSC) */ { .virtual = 0xfe000000, .pfn = __phys_to_pfn(0xfe000000), .length = SZ_16M, .type = MT_DEVICE_NONSHARED }, }; void __init r8a7779_map_io(void) { iotable_init(r8a7779_io_desc, ARRAY_SIZE(r8a7779_io_desc)); } static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe40000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(88), gic_spi(88), gic_spi(88), gic_spi(88) }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe41000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(89), gic_spi(89), gic_spi(89), gic_spi(89) }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe42000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(90), gic_spi(90), gic_spi(90), gic_spi(90) }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xffe43000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(91), gic_spi(91), gic_spi(91), gic_spi(91) }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xffe44000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(92), gic_spi(92), gic_spi(92), gic_spi(92) }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xffe45000, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { gic_spi(93), gic_spi(93), gic_spi(93), gic_spi(93) }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; /* TMU */ static struct sh_timer_config tmu00_platform_data = { .name = "TMU00", .channel_offset = 0x4, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu00_resources[] = { [0] = { .name = "TMU00", .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(32), .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu00_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu00_platform_data, }, .resource = tmu00_resources, .num_resources = ARRAY_SIZE(tmu00_resources), }; static struct sh_timer_config tmu01_platform_data = { .name = "TMU01", .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu01_resources[] = { [0] = { .name = "TMU01", .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(33), .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu01_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu01_platform_data, }, .resource = tmu01_resources, .num_resources = ARRAY_SIZE(tmu01_resources), }; static struct platform_device *r8a7779_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &tmu00_device, &tmu01_device, }; static struct platform_device *r8a7779_late_devices[] __initdata = { }; void __init r8a7779_add_standard_devices(void) { #ifdef CONFIG_CACHE_L2X0 /* Early BRESP enable, Shared attribute override enable, 64K*16way */ l2x0_init((void __iomem __force *)(0xf0100000), 0x40470000, 0x82000fff); #endif r8a7779_pm_init(); r8a7779_init_pm_domain(&r8a7779_sh4a); r8a7779_init_pm_domain(&r8a7779_sgx); r8a7779_init_pm_domain(&r8a7779_vdp1); r8a7779_init_pm_domain(&r8a7779_impx3); platform_add_devices(r8a7779_early_devices, ARRAY_SIZE(r8a7779_early_devices)); platform_add_devices(r8a7779_late_devices, ARRAY_SIZE(r8a7779_late_devices)); } /* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ void __init __weak r8a7779_register_twd(void) { } static void __init r8a7779_earlytimer_init(void) { r8a7779_clock_init(); shmobile_earlytimer_init(); r8a7779_register_twd(); } void __init r8a7779_add_early_devices(void) { early_platform_add_devices(r8a7779_early_devices, ARRAY_SIZE(r8a7779_early_devices)); /* Early serial console setup is not included here due to * memory map collisions. The SCIF serial ports in r8a7779 * are difficult to entity map 1:1 due to collision with the * virtual memory range used by the coherent DMA code on ARM. * * Anyone wanting to debug early can remove UPF_IOREMAP from * the sh-sci serial console platform data, adjust mapbase * to a static M:N virt:phys mapping that needs to be added to * the mappings passed with iotable_init() above. * * Then add a call to shmobile_setup_console() from this function. * * As a final step pass earlyprint=sh-sci.2,115200 on the kernel * command line in case of the marzen board. */ /* override timer setup with soc-specific code */ shmobile_timer.init = r8a7779_earlytimer_init; }
gpl-2.0
Dalton-West/kernel-lge-msm8226
kernel/irq/proc.c
6563
11497
/* * linux/kernel/irq/proc.c * * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar * * This file contains the /proc/irq/ handling code. */ #include <linux/irq.h> #include <linux/gfp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include "internals.h" static struct proc_dir_entry *root_irq_dir; #ifdef CONFIG_SMP static int show_irq_affinity(int type, struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); const struct cpumask *mask = desc->irq_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (irqd_is_setaffinity_pending(&desc->irq_data)) mask = desc->pending_mask; #endif if (type) seq_cpumask_list(m, mask); else seq_cpumask(m, mask); seq_putc(m, '\n'); return 0; } static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); unsigned long flags; cpumask_var_t mask; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; raw_spin_lock_irqsave(&desc->lock, flags); if (desc->affinity_hint) cpumask_copy(mask, desc->affinity_hint); raw_spin_unlock_irqrestore(&desc->lock, flags); seq_cpumask(m, mask); seq_putc(m, '\n'); free_cpumask_var(mask); return 0; } #ifndef is_affinity_mask_valid #define is_affinity_mask_valid(val) 1 #endif int no_irq_affinity; static int irq_affinity_proc_show(struct seq_file *m, void *v) { return show_irq_affinity(0, m, v); } static int irq_affinity_list_proc_show(struct seq_file *m, void *v) { return show_irq_affinity(1, m, v); } static ssize_t write_irq_affinity(int type, struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; cpumask_var_t new_value; int err; if (!irq_can_set_affinity(irq) || no_irq_affinity) return -EIO; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; if (type) err = cpumask_parselist_user(buffer, count, new_value); else err = cpumask_parse_user(buffer, count, new_value); if (err) goto free_cpumask; if (!is_affinity_mask_valid(new_value)) { err = -EINVAL; goto free_cpumask; } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpumask_intersects(new_value, cpu_online_mask)) { /* Special case for empty set - allow the architecture code to set default SMP affinity. */ err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; } else { irq_set_affinity(irq, new_value); err = count; } free_cpumask: free_cpumask_var(new_value); return err; } static ssize_t irq_affinity_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { return write_irq_affinity(0, file, buffer, count, pos); } static ssize_t irq_affinity_list_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { return write_irq_affinity(1, file, buffer, count, pos); } static int irq_affinity_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_proc_show, PDE(inode)->data); } static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data); } static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); } static const struct file_operations irq_affinity_proc_fops = { .open = irq_affinity_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = irq_affinity_proc_write, }; static const struct file_operations irq_affinity_hint_proc_fops = { .open = irq_affinity_hint_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations irq_affinity_list_proc_fops = { .open = irq_affinity_list_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = irq_affinity_list_proc_write, }; static int default_affinity_show(struct seq_file *m, void *v) { seq_cpumask(m, irq_default_affinity); seq_putc(m, '\n'); return 0; } static ssize_t default_affinity_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { cpumask_var_t new_value; int err; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(buffer, count, new_value); if (err) goto out; if (!is_affinity_mask_valid(new_value)) { err = -EINVAL; goto out; } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpumask_intersects(new_value, cpu_online_mask)) { err = -EINVAL; goto out; } cpumask_copy(irq_default_affinity, new_value); err = count; out: free_cpumask_var(new_value); return err; } static int default_affinity_open(struct inode *inode, struct file *file) { return single_open(file, default_affinity_show, PDE(inode)->data); } static const struct file_operations default_affinity_proc_fops = { .open = default_affinity_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = default_affinity_write, }; static int irq_node_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); seq_printf(m, "%d\n", desc->irq_data.node); return 0; } static int irq_node_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_node_proc_show, PDE(inode)->data); } static const struct file_operations irq_node_proc_fops = { .open = irq_node_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static int irq_spurious_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", desc->irq_count, desc->irqs_unhandled, jiffies_to_msecs(desc->last_unhandled)); return 0; } static int irq_spurious_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_spurious_proc_show, PDE(inode)->data); } static const struct file_operations irq_spurious_proc_fops = { .open = irq_spurious_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #define MAX_NAMELEN 128 static int name_unique(unsigned int irq, struct irqaction *new_action) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; int ret = 1; raw_spin_lock_irqsave(&desc->lock, flags); for (action = desc->action ; action; action = action->next) { if ((action != new_action) && action->name && !strcmp(new_action->name, action->name)) { ret = 0; break; } } raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } void register_handler_proc(unsigned int irq, struct irqaction *action) { char name [MAX_NAMELEN]; struct irq_desc *desc = irq_to_desc(irq); if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) return; memset(name, 0, MAX_NAMELEN); snprintf(name, MAX_NAMELEN, "%s", action->name); /* create /proc/irq/1234/handler/ */ action->dir = proc_mkdir(name, desc->dir); } #undef MAX_NAMELEN #define MAX_NAMELEN 10 void register_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) return; memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) return; #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ proc_create_data("smp_affinity", 0600, desc->dir, &irq_affinity_proc_fops, (void *)(long)irq); /* create /proc/irq/<irq>/affinity_hint */ proc_create_data("affinity_hint", 0400, desc->dir, &irq_affinity_hint_proc_fops, (void *)(long)irq); /* create /proc/irq/<irq>/smp_affinity_list */ proc_create_data("smp_affinity_list", 0600, desc->dir, &irq_affinity_list_proc_fops, (void *)(long)irq); proc_create_data("node", 0444, desc->dir, &irq_node_proc_fops, (void *)(long)irq); #endif proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; if (!root_irq_dir || !desc->dir) return; #ifdef CONFIG_SMP remove_proc_entry("smp_affinity", desc->dir); remove_proc_entry("affinity_hint", desc->dir); remove_proc_entry("smp_affinity_list", desc->dir); remove_proc_entry("node", desc->dir); #endif remove_proc_entry("spurious", desc->dir); memset(name, 0, MAX_NAMELEN); sprintf(name, "%u", irq); remove_proc_entry(name, root_irq_dir); } #undef MAX_NAMELEN void unregister_handler_proc(unsigned int irq, struct irqaction *action) { if (action->dir) { struct irq_desc *desc = irq_to_desc(irq); remove_proc_entry(action->dir->name, desc->dir); } } static void register_default_affinity_proc(void) { #ifdef CONFIG_SMP proc_create("irq/default_smp_affinity", 0600, NULL, &default_affinity_proc_fops); #endif } void init_irq_proc(void) { unsigned int irq; struct irq_desc *desc; /* create /proc/irq */ root_irq_dir = proc_mkdir("irq", NULL); if (!root_irq_dir) return; register_default_affinity_proc(); /* * Create entries for all existing IRQs. */ for_each_irq_desc(irq, desc) { if (!desc) continue; register_irq_proc(irq, desc); } } #ifdef CONFIG_GENERIC_IRQ_SHOW int __weak arch_show_interrupts(struct seq_file *p, int prec) { return 0; } #ifndef ACTUAL_NR_IRQS # define ACTUAL_NR_IRQS nr_irqs #endif int show_interrupts(struct seq_file *p, void *v) { static int prec; unsigned long flags, any_count = 0; int i = *(loff_t *) v, j; struct irqaction *action; struct irq_desc *desc; if (i > ACTUAL_NR_IRQS) return 0; if (i == ACTUAL_NR_IRQS) return arch_show_interrupts(p, prec); /* print header and calculate the width of the first column */ if (i == 0) { for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) j *= 10; seq_printf(p, "%*s", prec + 8, ""); for_each_online_cpu(j) seq_printf(p, "CPU%-8d", j); seq_putc(p, '\n'); } desc = irq_to_desc(i); if (!desc) return 0; raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; if (!action && !any_count) goto out; seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); if (desc->irq_data.chip) { if (desc->irq_data.chip->irq_print_chip) desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); else if (desc->irq_data.chip->name) seq_printf(p, " %8s", desc->irq_data.chip->name); else seq_printf(p, " %8s", "-"); } else { seq_printf(p, " %8s", "None"); } #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); #endif if (desc->name) seq_printf(p, "-%-8s", desc->name); if (action) { seq_printf(p, " %s", action->name); while ((action = action->next) != NULL) seq_printf(p, ", %s", action->name); } seq_putc(p, '\n'); out: raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } #endif
gpl-2.0
penhoi/linux-3.13.11.lbrpmu
arch/powerpc/platforms/cell/axon_msi.c
7331
11916
/* * Copyright 2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/export.h> #include <linux/of_platform.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <asm/dcr.h> #include <asm/machdep.h> #include <asm/prom.h> /* * MSIC registers, specified as offsets from dcr_base */ #define MSIC_CTRL_REG 0x0 /* Base Address registers specify FIFO location in BE memory */ #define MSIC_BASE_ADDR_HI_REG 0x3 #define MSIC_BASE_ADDR_LO_REG 0x4 /* Hold the read/write offsets into the FIFO */ #define MSIC_READ_OFFSET_REG 0x5 #define MSIC_WRITE_OFFSET_REG 0x6 /* MSIC control register flags */ #define MSIC_CTRL_ENABLE 0x0001 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 #define MSIC_CTRL_IRQ_ENABLE 0x0008 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 /* * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. * Currently we're using a 64KB FIFO size. */ #define MSIC_FIFO_SIZE_SHIFT 16 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) /* * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits * 8-9 of the MSIC control reg. */ #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) /* * We need to mask the read/write offsets to make sure they stay within * the bounds of the FIFO. Also they should always be 16-byte aligned. */ #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ #define MSIC_FIFO_ENTRY_SIZE 0x10 struct axon_msic { struct irq_domain *irq_domain; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; u32 read_offset; #ifdef DEBUG u32 __iomem *trigger; #endif }; #ifdef DEBUG void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); #else static inline void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { } #endif static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) { pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); dcr_write(msic->dcr_host, dcr_n, val); } static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_get_handler_data(irq); u32 write_offset, msi; int idx; int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { /* * Reading the MSIC_WRITE_OFFSET_REG does not * reliably flush the outstanding DMA to the * FIFO buffer. Here we were reading stale * data, so we need to retry. */ udelay(1); retry++; pr_devel("axon_msi: invalid irq 0x%x!\n", msi); continue; } if (retry) { pr_devel("axon_msi: late irq 0x%x, retry %d\n", msi, retry); retry = 0; } msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } if (retry) { printk(KERN_WARNING "axon_msi: irq timed out\n"); msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } chip->irq_eoi(&desc->irq_data); } static struct axon_msic *find_msi_translator(struct pci_dev *dev) { struct irq_domain *irq_domain; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return NULL; } for (; dn; dn = of_get_next_parent(dn)) { ph = of_get_property(dn, "msi-translator", NULL); if (ph) break; } if (!ph) { dev_dbg(&dev->dev, "axon_msi: no msi-translator property found\n"); goto out_error; } tmp = dn; dn = of_find_node_by_phandle(*ph); of_node_put(tmp); if (!dn) { dev_dbg(&dev->dev, "axon_msi: msi-translator doesn't point to a node\n"); goto out_error; } irq_domain = irq_find_host(dn); if (!irq_domain) { dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", dn->full_name); goto out_error; } msic = irq_domain->host_data; out_error: of_node_put(dn); return msic; } static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type) { if (!find_msi_translator(dev)) return -ENODEV; return 0; } static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) { struct device_node *dn; struct msi_desc *entry; int len; const u32 *prop; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return -ENODEV; } entry = list_first_entry(&dev->msi_list, struct msi_desc, list); for (; dn; dn = of_get_next_parent(dn)) { if (entry->msi_attrib.is_64) { prop = of_get_property(dn, "msi-address-64", &len); if (prop) break; } prop = of_get_property(dn, "msi-address-32", &len); if (prop) break; } if (!prop) { dev_dbg(&dev->dev, "axon_msi: no msi-address-(32|64) properties found\n"); return -ENOENT; } switch (len) { case 8: msg->address_hi = prop[0]; msg->address_lo = prop[1]; break; case 4: msg->address_hi = 0; msg->address_lo = prop[0]; break; default: dev_dbg(&dev->dev, "axon_msi: malformed msi-address-(32|64) property\n"); of_node_put(dn); return -EINVAL; } of_node_put(dn); return 0; } static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int virq, rc; struct msi_desc *entry; struct msi_msg msg; struct axon_msic *msic; msic = find_msi_translator(dev); if (!msic) return -ENODEV; rc = setup_msi_msg_address(dev, &msg); if (rc) return rc; list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); return -1; } dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); irq_set_msi_desc(virq, entry); msg.data = virq; write_msi_msg(virq, &msg); } return 0; } static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); list_for_each_entry(entry, &dev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } } static struct irq_chip msic_irq_chip = { .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, .irq_shutdown = mask_msi_irq, .name = "AXON-MSI", }; static int msic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; } static const struct irq_domain_ops msic_host_ops = { .map = msic_host_map, }; static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; pr_devel("axon_msi: disabling %s\n", msic->irq_domain->of_node->full_name); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); } static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; unsigned int virq; int dcr_base, dcr_len; pr_devel("axon_msi: setting up dn %s\n", dn->full_name); msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL); if (!msic) { printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n", dn->full_name); goto out; } dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR "axon_msi: couldn't parse dcr properties on %s\n", dn->full_name); goto out_free_msic; } msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(msic->dcr_host)) { printk(KERN_ERR "axon_msi: dcr_map failed for %s\n", dn->full_name); goto out_free_msic; } msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, &msic->fifo_phys, GFP_KERNEL); if (!msic->fifo_virt) { printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", dn->full_name); goto out_free_msic; } virq = irq_of_parse_and_map(dn, 0); if (virq == NO_IRQ) { printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n", dn->full_name); goto out_free_fifo; } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); goto out_free_fifo; } irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, msic->fifo_phys & 0xFFFFFFFF); msic_dcr_write(msic, MSIC_CTRL_REG, MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | MSIC_CTRL_FIFO_SIZE); msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) & MSIC_FIFO_SIZE_MASK; dev_set_drvdata(&device->dev, msic); ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; ppc_md.msi_check_device = axon_msi_check_device; axon_msi_debug_setup(dn, msic); printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); return 0; out_free_fifo: dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, msic->fifo_phys); out_free_msic: kfree(msic); out: return -1; } static const struct of_device_id axon_msi_device_id[] = { { .compatible = "ibm,axon-msic" }, {} }; static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { .name = "axon-msi", .owner = THIS_MODULE, .of_match_table = axon_msi_device_id, }, }; static int __init axon_msi_init(void) { return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); #ifdef DEBUG static int msic_set(void *data, u64 val) { struct axon_msic *msic = data; out_le32(msic->trigger, val); return 0; } static int msic_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { char name[8]; u64 addr; addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); if (addr == OF_BAD_ADDR) { pr_devel("axon_msi: couldn't translate reg property\n"); return; } msic->trigger = ioremap(addr, 0x4); if (!msic->trigger) { pr_devel("axon_msi: ioremap failed\n"); return; } snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); if (!debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic)) { pr_devel("axon_msi: debugfs_create_file failed!\n"); return; } } #endif /* DEBUG */
gpl-2.0
TeamBliss-Devices/kernel_xiaomi_cancro
arch/alpha/kernel/err_marvel.c
11939
37488
/* * linux/arch/alpha/kernel/err_marvel.c * * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation) * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/console.h> #include <asm/core_marvel.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev7.h> #include "err_impl.h" #include "proto.h" static void marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets) { #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_environmental_subpacket *env; struct { int type; char *name; } ev_packets[] = { { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE, "Ambient Temperature" }, { EL_TYPE__PAL__ENV__AIRMOVER_FAN, "AirMover / Fan" }, { EL_TYPE__PAL__ENV__VOLTAGE, "Voltage" }, { EL_TYPE__PAL__ENV__INTRUSION, "Intrusion" }, { EL_TYPE__PAL__ENV__POWER_SUPPLY, "Power Supply" }, { EL_TYPE__PAL__ENV__LAN, "LAN" }, { EL_TYPE__PAL__ENV__HOT_PLUG, "Hot Plug" }, { 0, NULL } }; int i; for (i = 0; ev_packets[i].type != 0; i++) { env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; if (!env) continue; printk("%s**%s event (cabinet %d, drawer %d)\n", err_print_prefix, ev_packets[i].name, env->cabinet, env->drawer); printk("%s Module Type: 0x%x - Unit ID 0x%x - " "Condition 0x%x\n", err_print_prefix, env->module_type, env->unit_id, env->condition); } #endif /* CONFIG_VERBOSE_MCHECK */ } static int marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; int i; for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE); i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG); i++) { if (lf_subpackets->env[i]) status = MCHK_DISPOSITION_REPORT; } if (print) marvel_print_680_frame(lf_subpackets); return status; } #ifdef CONFIG_VERBOSE_MCHECK static void marvel_print_err_cyc(u64 err_cyc) { static char *packet_desc[] = { "No Error", "UNKNOWN", "1 cycle (1 or 2 flit packet)", "2 cycles (3 flit packet)", "9 cycles (18 flit packet)", "10 cycles (19 flit packet)", "UNKNOWN", "UNKNOWN", "UNKNOWN" }; #define IO7__ERR_CYC__ODD_FLT (1UL << 0) #define IO7__ERR_CYC__EVN_FLT (1UL << 1) #define IO7__ERR_CYC__PACKET__S (6) #define IO7__ERR_CYC__PACKET__M (0x7) #define IO7__ERR_CYC__LOC (1UL << 5) #define IO7__ERR_CYC__CYCLE__S (2) #define IO7__ERR_CYC__CYCLE__M (0x7) printk("%s Packet In Error: %s\n" "%s Error in %s, cycle %lld%s%s\n", err_print_prefix, packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], err_print_prefix, (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER", EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE), (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "", (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": ""); } static void marvel_print_po7_crrct_sym(u64 crrct_sym) { #define IO7__PO7_CRRCT_SYM__SYN__S (0) #define IO7__PO7_CRRCT_SYM__SYN__M (0x7f) #define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */ #define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff) printk("%s Correctable Error Symptoms:\n" "%s Syndrome: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); } static void marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) { static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" }; static char *clk_decode[] = { "No Error", "One extra rising edge", "Two extra rising edges", "Lost one clock" }; static char *port_names[] = { "Port 0", "Port 1", "Port 2", "Port 3", "Unknown Port", "Unknown Port", "Unknown Port", "Port 7" }; int scratch, i; #define IO7__PO7_UNCRR_SYM__SYN__S (0) #define IO7__PO7_UNCRR_SYM__SYN__M (0x7f) #define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */ #define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */ #define IO7__PO7_UNCRR_SYM__CLK__S (16) #define IO7__PO7_UNCRR_SYM__CLK__M (0xff) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28) #define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29) #define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30) #define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40) #define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48) #define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff) #define IO7__STRV_VTR__LSI__INTX__S (0) #define IO7__STRV_VTR__LSI__INTX__M (0x3) #define IO7__STRV_VTR__LSI__SLOT__S (2) #define IO7__STRV_VTR__LSI__SLOT__M (0x7) #define IO7__STRV_VTR__LSI__BUS__S (5) #define IO7__STRV_VTR__LSI__BUS__M (0x3) #define IO7__STRV_VTR__MSI__INTNUM__S (0) #define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff) #define IO7__STRV_VTR__IS_MSI (1UL << 9) printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix); uncrr_sym &= valid_mask; if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) printk("%s Syndrome: 0x%llx\n", err_print_prefix, EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC)) marvel_print_err_cyc(EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__ERR_CYC)); scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK); for (i = 0; i < 4; i++, scratch >>= 2) { if (scratch & 0x3) printk("%s Clock %s: %s\n", err_print_prefix, clk_names[i], clk_decode[scratch & 0x3]); } if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ) printk("%s REQ Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO) printk("%s RIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO) printk("%s WIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK) printk("%s BLK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK) printk("%s NBK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO) printk("%s Read I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO) printk("%s Write I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD) printk("%s FWD Buffer Overflow\n", err_print_prefix); if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) { int lost = scratch & (1UL << 4); scratch &= ~lost; for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Response sent to %s", err_print_prefix, port_names[i]); } if (lost) printk("%s Lost Error sent somewhere else\n", err_print_prefix); } if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) { for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Reported by %s", err_print_prefix, port_names[i]); } } if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) { char starvation_message[80]; scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR); if (scratch & IO7__STRV_VTR__IS_MSI) sprintf(starvation_message, "MSI Interrupt 0x%x", EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM)); else sprintf(starvation_message, "LSI INT%c for Bus:Slot (%d:%d)\n", 'A' + EXTRACT(scratch, IO7__STRV_VTR__LSI__INTX), EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS), EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT)); printk("%s Starvation Int Trigger By: %s\n", err_print_prefix, starvation_message); } } static void marvel_print_po7_ugbge_sym(u64 ugbge_sym) { char opcode_str[10]; #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6) #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff) #define IO7__PO7_UGBGE_SYM__VALID (1UL << 63) if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID)) return; switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) { case 0x51: sprintf(opcode_str, "Wr32"); break; case 0x50: sprintf(opcode_str, "WrQW"); break; case 0x54: sprintf(opcode_str, "WrIPR"); break; case 0xD8: sprintf(opcode_str, "Victim"); break; case 0xC5: sprintf(opcode_str, "BlkIO"); break; default: sprintf(opcode_str, "0x%llx\n", EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); break; } printk("%s Up Hose Garbage Symptom:\n" "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", err_print_prefix, err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID), opcode_str); if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) printk("%s Packet Offset 0x%08llx\n", err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); } static void marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) { u64 uncrr_sym_valid = 0; #define IO7__PO7_ERRSUM__CR_SBE (1UL << 32) #define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33) #define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34) #define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35) #define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36) #define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37) #define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38) #define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39) #define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40) #define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41) #define IO7__PO7_ERRSUM__MAF_TO (1UL << 42) #define IO7__PO7_ERRSUM__UGBGE (1UL << 43) #define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44) #define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45) #define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46) #define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47) #define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51) #define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52) #define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53) #define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54) #define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55) #define IO7__PO7_ERRSUM__HLT_INT (1UL << 56) #define IO7__PO7_ERRSUM__HP_INT (1UL << 57) #define IO7__PO7_ERRSUM__CRD_INT (1UL << 58) #define IO7__PO7_ERRSUM__STV_INT (1UL << 59) #define IO7__PO7_ERRSUM__HRD_INT (1UL << 60) #define IO7__PO7_ERRSUM__BH_SUM (1UL << 61) #define IO7__PO7_ERRSUM__ERR_LST (1UL << 62) #define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63) #define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \ IO7__PO7_ERRSUM__CR_SBE) /* * Single bit errors aren't covered by ERR_VALID. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { printk("%s %sSingle Bit Error(s) detected/corrected\n", err_print_prefix, (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) ? "Multiple " : ""); marvel_print_po7_crrct_sym(io->po7_crrct_sym); } /* * Neither are the interrupt status bits */ if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) printk("%s Halt Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { printk("%s Hot Plug Event Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) printk("%s Correctable Error Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { printk("%s Starvation Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR); } if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { printk("%s Hard Error Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } /* * Everything else is valid only with ERR_VALID, so skip to the end * (uncrr_sym check) unless ERR_VALID is set. */ if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) goto check_uncrr_sym; /* * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid. * For bits [29:0] to also be valid, the following bits must * not be set: * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM * CR_ERR_RESP MAF_TO */ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP); if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | IO7__PO7_ERRSUM__CR_CSR_NXM | IO7__PO7_ERRSUM__CR_RSP_NXM | IO7__PO7_ERRSUM__CR_ERR_RESP | IO7__PO7_ERRSUM__MAF_TO))) uncrr_sym_valid |= 0x3ffffffful; if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) printk("%s Write byte into IO7 CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) printk("%s PIO to non-existent CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) printk("%s Bus Requester PID (Access Violation)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) printk("%s Received NXM response from EV7\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) printk("%s Received ERROR RESPONSE\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) printk("%s Clock error on data flit\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) printk("%s Double Bit Error Data Error Detected\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) printk("%s Garbage Encoding Detected on the data\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { printk("%s Garbage Encoding sent up hose\n", err_print_prefix); marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); } if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) printk("%s Orphan response (unexpected response)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) printk("%s Down hose packet overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) printk("%s Down hose credit overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) printk("%s Unexpected or bad dealloc field\n", err_print_prefix); /* * The black hole events. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) printk("%s BLACK HOLE: Timeout for all responses\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) printk("%s BLACK HOLE: Clock check on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) printk("%s BLACK HOLE: Uncorrectable Error on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) printk("%s BLACK HOLE: Garbage on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) printk("%s BLACK HOLE: Bad EV7 command\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) printk("%s Lost Error\n", err_print_prefix); printk("%s Failing Packet:\n" "%s Cycle 1: %016llx\n" "%s Cycle 2: %016llx\n", err_print_prefix, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); /* * If there are any valid bits in UNCRR sym for this err, * print UNCRR_SYM as well. */ check_uncrr_sym: if (uncrr_sym_valid) marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); } static void marvel_print_pox_tlb_err(u64 tlb_err) { static char *tlb_errors[] = { "No Error", "North Port Signaled Error fetching TLB entry", "PTE invalid or UCC or GBG error on this entry", "Address did not hit any DMA window" }; #define IO7__POX_TLBERR__ERR_VALID (1UL << 63) #define IO7__POX_TLBERR__ERRCODE__S (0) #define IO7__POX_TLBERR__ERRCODE__M (0x3) #define IO7__POX_TLBERR__ERR_TLB_PTR__S (3) #define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7) #define IO7__POX_TLBERR__FADDR__S (6) #define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful) if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) return; printk("%s TLB Error on index 0x%llx:\n" "%s - %s\n" "%s - Addr: 0x%016llx\n", err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), err_print_prefix, tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)], err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6); } static void marvel_print_pox_spl_cmplt(u64 spl_cmplt) { char message[80]; #define IO7__POX_SPLCMPLT__MESSAGE__S (0) #define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful) #define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40) #define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful) #define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35) #define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul) #define IO7__POX_SPLCMPLT__MSG_CLASS__S (28) #define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf) #define IO7__POX_SPLCMPLT__MSG_INDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) printk("%s Split Completion Error:\n" "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", err_print_prefix, err_print_prefix, EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC)); switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) { case 0x000: sprintf(message, "Normal completion"); break; case 0x100: sprintf(message, "Bridge - Master Abort"); break; case 0x101: sprintf(message, "Bridge - Target Abort"); break; case 0x102: sprintf(message, "Bridge - Uncorrectable Write Data Error"); break; case 0x200: sprintf(message, "Byte Count Out of Range"); break; case 0x201: sprintf(message, "Uncorrectable Split Write Data Error"); break; default: sprintf(message, "%08llx\n", EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); break; } printk("%s Message: %s\n", err_print_prefix, message); } static void marvel_print_pox_trans_sum(u64 trans_sum) { static const char * const pcix_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved / Device ID Message", "Memory Read", "Memory Write", "Reserved / Alias to Memory Read Block", "Reserved / Alias to Memory Write Block", "Configuration Read", "Configuration Write", "Memory Read Multiple / Split Completion", "Dual Address Cycle", "Memory Read Line / Memory Read Block", "Memory Write and Invalidate / Memory Write Block" }; #define IO7__POX_TRANSUM__PCI_ADDR__S (0) #define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful) #define IO7__POX_TRANSUM__DAC (1UL << 50) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf) #define IO7__POX_TRANSUM__PCIX_CMD__S (56) #define IO7__POX_TRANSUM__PCIX_CMD__M (0xf) #define IO7__POX_TRANSUM__ERR_VALID (1UL << 63) if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID)) return; printk("%s Transaction Summary:\n" "%s Command: 0x%llx - %s\n" "%s Address: 0x%016llx%s\n" "%s PCI-X Master Slot: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)], err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR), (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "", err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT)); } static void marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port) { #define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4) #define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5) #define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6) #define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7) #define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8) #define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9) #define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10) #define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11) #define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12) #define IO7__POX_ERRSUM__DMA_TO (1UL << 13) #define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14) #define IO7__POX_ERRSUM__MABORT (1UL << 15) #define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\ IO7__POX_ERRSUM__MABORT) #define IO7__POX_ERRSUM__PT_TABORT (1UL << 16) #define IO7__POX_ERRSUM__PM_TABORT (1UL << 17) #define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \ IO7__POX_ERRSUM__PM_TABORT) #define IO7__POX_ERRSUM__SERR (1UL << 18) #define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19) #define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20) #define IO7__POX_ERRSUM__PERR (1UL << 21) #define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22) #define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23) #define IO7__POX_ERRSUM__PM_PERR (1UL << 24) #define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26) #define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28) #define IO7__POX_ERRSUM__UPE_ERROR__S (51) #define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL) #define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR) #define IO7__POX_ERRSUM__TLB_ERR (1UL << 59) #define IO7__POX_ERRSUM__ERR_VALID (1UL << 63) #define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \ IO7__POX_ERRSUM__PCIX_UX_SPL | \ IO7__POX_ERRSUM__PCIX_SPLIT_TO | \ IO7__POX_ERRSUM__DMA_TO | \ IO7__POX_ERRSUM__MABORT_MASK | \ IO7__POX_ERRSUM__TABORT_MASK | \ IO7__POX_ERRSUM__SERR | \ IO7__POX_ERRSUM__ADDRERR_STB | \ IO7__POX_ERRSUM__PERR | \ IO7__POX_ERRSUM__DATAERR_STB_NIOW |\ IO7__POX_ERRSUM__DETECTED_PERR | \ IO7__POX_ERRSUM__PM_PERR | \ IO7__POX_ERRSUM__PT_SCERROR | \ IO7__POX_ERRSUM__UPE_ERROR) if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID)) return; /* * First the transaction summary errors */ if (err_sum & IO7__POX_ERRSUM__MRETRY_TO) printk("%s IO7 Master Retry Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL) printk("%s Unexpected Split Completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO) printk("%s IO7 Split Completion Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_TO) printk("%s Hung bus during DMA transaction\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__MABORT_MASK) printk("%s Master Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_TABORT) printk("%s IO7 Asserted Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_TABORT) printk("%s IO7 Received Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) { printk("%s Address or PCI-X Attribute Parity Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__SERR) printk("%s IO7 Asserted SERR\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__PERR) { if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW) printk("%s IO7 Detected Data Parity Error\n", err_print_prefix); else printk("%s Split Completion Response with " "Parity Error\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR) printk("%s PERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_PERR) printk("%s PERR while IO7 is master\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) { printk("%s IO7 Received Split Completion Error message\n", err_print_prefix); marvel_print_pox_spl_cmplt(port->pox_spl_cmplt); } if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) { unsigned int upe_error = EXTRACT(err_sum, IO7__POX_ERRSUM__UPE_ERROR); int i; static char *upe_errors[] = { "Parity Error on MSI write data", "MSI read (MSI window is write only", "TLB - Invalid WR transaction", "TLB - Invalid RD transaction", "DMA - WR error (see north port)", "DMA - RD error (see north port)", "PPR - WR error (see north port)", "PPR - RD error (see north port)" }; printk("%s UPE Error:\n", err_print_prefix); for (i = 0; i < 8; i++) { if (upe_error & (1 << i)) printk("%s %s\n", err_print_prefix, upe_errors[i]); } } /* * POx_TRANS_SUM, if appropriate. */ if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK) marvel_print_pox_trans_sum(port->pox_trans_sum); /* * Then TLB_ERR. */ if (err_sum & IO7__POX_ERRSUM__TLB_ERR) { printk("%s TLB ERROR\n", err_print_prefix); marvel_print_pox_tlb_err(port->pox_tlb_err); } /* * And the single bit status errors. */ if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL) printk("%s AGP Request Queue Overflow\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR) printk("%s AGP Sync Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL) printk("%s Discarded split completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO) printk("%s DMA Read Timeout\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD) printk("%s CSR NXM READ\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR) printk("%s CSR NXM WRITE\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR) printk("%s SERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__HUNG_BUS) printk("%s HUNG BUS detected\n", err_print_prefix); } #endif /* CONFIG_VERBOSE_MCHECK */ static struct ev7_pal_io_subpacket * marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets) { struct ev7_pal_io_subpacket *io = lf_subpackets->io; struct io7 *io7; int i; /* * Caller must provide the packet to fill */ if (!io) return NULL; /* * Fill the subpacket with the console's standard fill pattern */ memset(io, 0x55, sizeof(*io)); for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) { unsigned long err_sum = 0; err_sum |= io7->csrs->PO7_ERROR_SUM.csr; for (i = 0; i < IO7_NUM_PORTS; i++) { if (!io7->ports[i].enabled) continue; err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr; } /* * Is there at least one error? */ if (err_sum & (1UL << 63)) break; } /* * Did we find an IO7 with an error? */ if (!io7) return NULL; /* * We have an IO7 with an error. * * Fill in the IO subpacket. */ io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; io->io7_uph = io7->csrs->IO7_UPH.csr; io->hpi_ctl = io7->csrs->HPI_CTL.csr; io->crd_ctl = io7->csrs->CRD_CTL.csr; io->hei_ctl = io7->csrs->HEI_CTL.csr; io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; for (i = 0; i < IO7_NUM_PORTS; i++) { io7_ioport_csrs *csrs = io7->ports[i].csrs; if (!io7->ports[i].enabled) continue; io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; /* * Ack this port's errors, if any. POx_ERR_SUM must be last. * * Most of the error registers get cleared and unlocked when * the associated bits in POx_ERR_SUM are cleared (by writing * 1). POx_TLB_ERR is an exception and must be explicitly * cleared. */ csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; mb(); csrs->POx_ERR_SUM.csr; } /* * Ack any port 7 error(s). */ io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; mb(); io7->csrs->PO7_ERROR_SUM.csr; /* * Correct the io7_pid. */ lf_subpackets->io_pid = io7->pe; return io; } static int marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_io_subpacket *io = lf_subpackets->io; int i; #endif /* CONFIG_VERBOSE_MCHECK */ #define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63)) if (!lf_subpackets->logout || !lf_subpackets->io) return status; /* * The PALcode only builds an IO subpacket if there is a * locally connected IO7. In the cases of * 1) a uniprocessor kernel * 2) an mp kernel before the local secondary has called in * error interrupts are all directed to the primary processor. * In that case, we may not have an IO subpacket at all and, event * if we do, it may not be the right now. * * If the RBOX indicates an I/O error interrupt, make sure we have * the correct IO7 information. If we don't have an IO subpacket * or it's the wrong one, try to find the right one. * * RBOX I/O error interrupts are indicated by RBOX_INT<29> and * RBOX_INT<10>. */ if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || ((lf_subpackets->io->po7_error_sum | lf_subpackets->io->ports[0].pox_err_sum | lf_subpackets->io->ports[1].pox_err_sum | lf_subpackets->io->ports[2].pox_err_sum | lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { /* * Either we have no IO subpacket or no error is * indicated in the one we do have. Try find the * one with the error. */ if (!marvel_find_io7_with_error(lf_subpackets)) return status; } /* * We have an IO7 indicating an error - we're going to report it */ status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s*Error occurred on IO7 at PID %u\n", err_print_prefix, lf_subpackets->io_pid); /* * Check port 7 first */ if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { marvel_print_po7_err_sum(io); #if 0 printk("%s PORT 7 ERROR:\n" "%s PO7_ERROR_SUM: %016llx\n" "%s PO7_UNCRR_SYM: %016llx\n" "%s PO7_CRRCT_SYM: %016llx\n" "%s PO7_UGBGE_SYM: %016llx\n" "%s PO7_ERR_PKT0: %016llx\n" "%s PO7_ERR_PKT1: %016llx\n", err_print_prefix, err_print_prefix, io->po7_error_sum, err_print_prefix, io->po7_uncrr_sym, err_print_prefix, io->po7_crrct_sym, err_print_prefix, io->po7_ugbge_sym, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); #endif } /* * Then loop through the ports */ for (i = 0; i < IO7_NUM_PORTS; i++) { if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) continue; printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n", err_print_prefix, lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); printk("%s [ POx_FIRST_ERR: %016llx ]\n", err_print_prefix, io->ports[i].pox_first_err); marvel_print_pox_err(io->ports[i].pox_first_err, &io->ports[i]); } #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * I/O error? */ #define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul if (lf_subpackets->logout && (lf_subpackets->logout->rbox_int & 0x20000400ul)) status = marvel_process_io_error(lf_subpackets, print); /* * Probing behind PCI-X bridges can cause machine checks on * Marvel when the probe is handled by the bridge as a split * completion transaction. The symptom is an ERROR_RESPONSE * to a CONFIG address. Since these errors will happen in * normal operation, dismiss them. * * Dismiss if: * C_STAT = 0x14 (Error Response) * C_STS<3> = 0 (C_ADDR valid) * C_ADDR<42> = 1 (I/O) * C_ADDR<31:22> = 111110xxb (PCI Config space) */ if (lf_subpackets->ev7 && (lf_subpackets->ev7->c_stat == 0x14) && !(lf_subpackets->ev7->c_sts & 0x8) && ((lf_subpackets->ev7->c_addr & 0x400ff000000ul) == 0x400fe000000ul)) status = MCHK_DISPOSITION_DISMISS; return status; } void marvel_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL; struct ev7_lf_subpackets subpacket_collection = { NULL, }; struct ev7_pal_io_subpacket scratch_io_packet = { 0, }; struct ev7_lf_subpackets *lf_subpackets = NULL; int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR; char *saved_err_prefix = err_print_prefix; char *error_type = NULL; /* * Sync the processor */ mb(); draina(); switch(vector) { case SCB_Q_SYSEVENT: process_frame = marvel_process_680_frame; error_type = "System Event"; break; case SCB_Q_SYSMCHK: process_frame = marvel_process_logout_frame; error_type = "System Uncorrectable Error"; break; case SCB_Q_SYSERR: process_frame = marvel_process_logout_frame; error_type = "System Correctable Error"; break; default: /* Don't know it - pass it up. */ ev7_machine_check(vector, la_ptr); return; } /* * A system event or error has occurred, handle it here. * * Any errors in the logout frame have already been cleared by the * PALcode, so just parse it. */ err_print_prefix = KERN_CRIT; /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ lf_subpackets = ev7_collect_logout_frame_subpackets(el_ptr, &subpacket_collection); if (process_frame && lf_subpackets && lf_subpackets->logout) { /* * We might not have the correct (or any) I/O subpacket. * [ See marvel_process_io_error() for explanation. ] * If we don't have one, point the io subpacket in * lf_subpackets at scratch_io_packet so that * marvel_find_io7_with_error() will have someplace to * store the info. */ if (!lf_subpackets->io) lf_subpackets->io = &scratch_io_packet; /* * Default io_pid to the processor reporting the error * [this will get changed in marvel_find_io7_with_error() * if a different one is needed] */ lf_subpackets->io_pid = lf_subpackets->logout->whami; /* * Evaluate the frames. */ disposition = process_frame(lf_subpackets, 0); } switch(disposition) { case MCHK_DISPOSITION_DISMISS: /* Nothing to do. */ break; case MCHK_DISPOSITION_REPORT: /* Recognized error, report it. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_print_timestamp(&lf_subpackets->logout->timestamp); process_frame(lf_subpackets, 1); break; default: /* Unknown - dump the annotated subpackets. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_process_subpacket(el_ptr); break; } err_print_prefix = saved_err_prefix; /* Release the logout frame. */ wrmces(0x7); mb(); } void __init marvel_register_error_handlers(void) { ev7_register_error_handlers(); }
gpl-2.0
keitaroht/SO
fs/logfs/compr.c
12707
1816
/* * fs/logfs/compr.c - compression routines * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/vmalloc.h> #include <linux/zlib.h> #define COMPR_LEVEL 3 static DEFINE_MUTEX(compr_mutex); static struct z_stream_s stream; int logfs_compress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_deflateInit(&stream, COMPR_LEVEL); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_deflateEnd(&stream); if (err != Z_OK) goto error; if (stream.total_out >= stream.total_in) goto error; ret = stream.total_out; error: mutex_unlock(&compr_mutex); return ret; } int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_inflateInit(&stream); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_inflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_inflateEnd(&stream); if (err != Z_OK) goto error; ret = 0; error: mutex_unlock(&compr_mutex); return ret; } int __init logfs_compr_init(void) { size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); stream.workspace = vmalloc(size); if (!stream.workspace) return -ENOMEM; return 0; } void logfs_compr_exit(void) { vfree(stream.workspace); }
gpl-2.0
Radium-Devices/Radium_taoshan
fs/logfs/compr.c
12707
1816
/* * fs/logfs/compr.c - compression routines * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/vmalloc.h> #include <linux/zlib.h> #define COMPR_LEVEL 3 static DEFINE_MUTEX(compr_mutex); static struct z_stream_s stream; int logfs_compress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_deflateInit(&stream, COMPR_LEVEL); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_deflateEnd(&stream); if (err != Z_OK) goto error; if (stream.total_out >= stream.total_in) goto error; ret = stream.total_out; error: mutex_unlock(&compr_mutex); return ret; } int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_inflateInit(&stream); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_inflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_inflateEnd(&stream); if (err != Z_OK) goto error; ret = 0; error: mutex_unlock(&compr_mutex); return ret; } int __init logfs_compr_init(void) { size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); stream.workspace = vmalloc(size); if (!stream.workspace) return -ENOMEM; return 0; } void logfs_compr_exit(void) { vfree(stream.workspace); }
gpl-2.0
sudosurootdev/gcc
gcc/testsuite/gcc.dg/cpp/tr-warn6.c
164
1789
/* Test for -Wtraditional warnings for stringification of macro args. Note, gcc should omit these warnings in system header files. By Kaveh R. Ghazi <ghazi@caip.rutgers.edu> 9/8/2000. */ /* { dg-do preprocess } */ /* { dg-options "-Wtraditional" } */ #define foo1(h) sdf "h3" fds "h" /* { dg-warning "macro argument \"h\" would be stringified" "traditional stringification" } */ #define foo2(h2) sdf "h2" fds "h3" /* { dg-warning "macro argument \"h2\" would be stringified" "traditional stringification" } */ #define foo3(h3) sdf "h2" fds "h3" /* { dg-warning "macro argument \"h3\" would be stringified" "traditional stringification" } */ #define foo4(h) sdf 'h3' fds 'h' /* { dg-warning "macro argument \"h\" would be stringified" "traditional stringification" } */ #define foo5(h2) sdf 'h2' fds 'h3' /* { dg-warning "macro argument \"h2\" would be stringified" "traditional stringification" } */ #define foo6(h3) sdf 'h2' fds 'h3' /* { dg-warning "macro argument \"h3\" would be stringified" "traditional stringification" } */ #define foo7(AA, hello, world, EEE) sdf "A B hello C,world,DhelloE F" fds EEE /* { dg-warning "macro argument \"hello\" would be stringified" "traditional stringification" } */ /* Catch the second warning from the above line. */ /* { dg-warning "macro argument \"world\" would be stringified" "traditional stringification second warning" { target *-*-* } 13 } */ # 19 "sys-header.h" 3 /* We are in system headers now, no -Wtraditional warnings should issue. */ #define bar1(h) sdf "h3" fds "h" #define bar2(h2) sdf "h2" fds "h3" #define bar3(h3) sdf "h2" fds "h3" #define bar4(h) sdf 'h3' fds 'h' #define bar5(h2) sdf 'h2' fds 'h3' #define bar6(h3) sdf 'h2' fds 'h3' #define bar7(AA, hello, world, EEE) sdf "A B hello C,world,DhelloE F" fds EEE
gpl-2.0
astarasikov/iconia-gnu-kernel
fs/jfs/jfs_imap.c
164
86344
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * jfs_imap.c: inode allocation map manager * * Serialization: * Each AG has a simple lock which is used to control the serialization of * the AG level lists. This lock should be taken first whenever an AG * level list will be modified or accessed. * * Each IAG is locked by obtaining the buffer for the IAG page. * * There is also a inode lock for the inode map inode. A read lock needs to * be taken whenever an IAG is read from the map or the global level * information is read. A write lock needs to be taken whenever the global * level information is modified or an atomic operation needs to be used. * * If more than one IAG is read at one time, the read lock may not * be given up until all of the IAG's are read. Otherwise, a deadlock * may occur when trying to obtain the read lock while another thread * holding the read lock is waiting on the IAG already being held. * * The control page of the inode map is read into memory by diMount(). * Thereafter it should only be modified in memory and then it will be * written out when the filesystem is unmounted by diUnmount(). */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/slab.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" #include "jfs_dinode.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_debug.h" /* * imap locks */ /* iag free list lock */ #define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock) #define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock) #define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock) /* per ag iag list locks */ #define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index])) #define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno]) #define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno]) /* * forward references */ static int diAllocAG(struct inomap *, int, bool, struct inode *); static int diAllocAny(struct inomap *, int, bool, struct inode *); static int diAllocBit(struct inomap *, struct iag *, int); static int diAllocExt(struct inomap *, int, struct inode *); static int diAllocIno(struct inomap *, int, struct inode *); static int diFindFree(u32, int); static int diNewExt(struct inomap *, struct iag *, int); static int diNewIAG(struct inomap *, int *, int, struct metapage **); static void duplicateIXtree(struct super_block *, s64, int, s64 *); static int diIAGRead(struct inomap * imap, int, struct metapage **); static int copy_from_dinode(struct dinode *, struct inode *); static void copy_to_dinode(struct dinode *, struct inode *); /* * NAME: diMount() * * FUNCTION: initialize the incore inode map control structures for * a fileset or aggregate init time. * * the inode map's control structure (dinomap) is * brought in from disk and placed in virtual memory. * * PARAMETERS: * ipimap - pointer to inode map inode for the aggregate or fileset. * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient free virtual memory. * -EIO - i/o error. */ int diMount(struct inode *ipimap) { struct inomap *imap; struct metapage *mp; int index; struct dinomap_disk *dinom_le; /* * allocate/initialize the in-memory inode map control structure */ /* allocate the in-memory inode map control structure. */ imap = kmalloc(sizeof(struct inomap), GFP_KERNEL); if (imap == NULL) { jfs_err("diMount: kmalloc returned NULL!"); return -ENOMEM; } /* read the on-disk inode map control structure. */ mp = read_metapage(ipimap, IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { kfree(imap); return -EIO; } /* copy the on-disk version to the in-memory version. */ dinom_le = (struct dinomap_disk *) mp->data; imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag); imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag); atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos)); atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree)); imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext); imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext); for (index = 0; index < MAXAG; index++) { imap->im_agctl[index].inofree = le32_to_cpu(dinom_le->in_agctl[index].inofree); imap->im_agctl[index].extfree = le32_to_cpu(dinom_le->in_agctl[index].extfree); imap->im_agctl[index].numinos = le32_to_cpu(dinom_le->in_agctl[index].numinos); imap->im_agctl[index].numfree = le32_to_cpu(dinom_le->in_agctl[index].numfree); } /* release the buffer. */ release_metapage(mp); /* * allocate/initialize inode allocation map locks */ /* allocate and init iag free list lock */ IAGFREE_LOCK_INIT(imap); /* allocate and init ag list locks */ for (index = 0; index < MAXAG; index++) { AG_LOCK_INIT(imap, index); } /* bind the inode map inode and inode map control structure * to each other. */ imap->im_ipimap = ipimap; JFS_IP(ipimap)->i_imap = imap; return (0); } /* * NAME: diUnmount() * * FUNCTION: write to disk the incore inode map control structures for * a fileset or aggregate at unmount time. * * PARAMETERS: * ipimap - pointer to inode map inode for the aggregate or fileset. * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient free virtual memory. * -EIO - i/o error. */ int diUnmount(struct inode *ipimap, int mounterror) { struct inomap *imap = JFS_IP(ipimap)->i_imap; /* * update the on-disk inode map control structure */ if (!(mounterror || isReadOnly(ipimap))) diSync(ipimap); /* * Invalidate the page cache buffers */ truncate_inode_pages(ipimap->i_mapping, 0); /* * free in-memory control structure */ kfree(imap); return (0); } /* * diSync() */ int diSync(struct inode *ipimap) { struct dinomap_disk *dinom_le; struct inomap *imp = JFS_IP(ipimap)->i_imap; struct metapage *mp; int index; /* * write imap global conrol page */ /* read the on-disk inode map control structure */ mp = get_metapage(ipimap, IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { jfs_err("diSync: get_metapage failed!"); return -EIO; } /* copy the in-memory version to the on-disk version */ dinom_le = (struct dinomap_disk *) mp->data; dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag); dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag); dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos)); dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree)); dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext); dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext); for (index = 0; index < MAXAG; index++) { dinom_le->in_agctl[index].inofree = cpu_to_le32(imp->im_agctl[index].inofree); dinom_le->in_agctl[index].extfree = cpu_to_le32(imp->im_agctl[index].extfree); dinom_le->in_agctl[index].numinos = cpu_to_le32(imp->im_agctl[index].numinos); dinom_le->in_agctl[index].numfree = cpu_to_le32(imp->im_agctl[index].numfree); } /* write out the control structure */ write_metapage(mp); /* * write out dirty pages of imap */ filemap_write_and_wait(ipimap->i_mapping); diWriteSpecial(ipimap, 0); return (0); } /* * NAME: diRead() * * FUNCTION: initialize an incore inode from disk. * * on entry, the specifed incore inode should itself * specify the disk inode number corresponding to the * incore inode (i.e. i_number should be initialized). * * this routine handles incore inode initialization for * both "special" and "regular" inodes. special inodes * are those required early in the mount process and * require special handling since much of the file system * is not yet initialized. these "special" inodes are * identified by a NULL inode map inode pointer and are * actually initialized by a call to diReadSpecial(). * * for regular inodes, the iag describing the disk inode * is read from disk to determine the inode extent address * for the disk inode. with the inode extent address in * hand, the page of the extent that contains the disk * inode is read and the disk inode is copied to the * incore inode. * * PARAMETERS: * ip - pointer to incore inode to be initialized from disk. * * RETURN VALUES: * 0 - success * -EIO - i/o error. * -ENOMEM - insufficient memory * */ int diRead(struct inode *ip) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); int iagno, ino, extno, rc; struct inode *ipimap; struct dinode *dp; struct iag *iagp; struct metapage *mp; s64 blkno, agstart; struct inomap *imap; int block_offset; int inodes_left; unsigned long pageno; int rel_inode; jfs_info("diRead: ino = %ld", ip->i_ino); ipimap = sbi->ipimap; JFS_IP(ip)->ipimap = ipimap; /* determine the iag number for this inode (number) */ iagno = INOTOIAG(ip->i_ino); /* read the iag */ imap = JFS_IP(ipimap)->i_imap; IREAD_LOCK(ipimap, RDWRLOCK_IMAP); rc = diIAGRead(imap, iagno, &mp); IREAD_UNLOCK(ipimap); if (rc) { jfs_err("diRead: diIAGRead returned %d", rc); return (rc); } iagp = (struct iag *) mp->data; /* determine inode extent that holds the disk inode */ ino = ip->i_ino & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) || (addressPXD(&iagp->inoext[extno]) == 0)) { release_metapage(mp); return -ESTALE; } /* get disk block number of the page within the inode extent * that holds the disk inode. */ blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage); /* get the ag for the iag */ agstart = le64_to_cpu(iagp->agstart); release_metapage(mp); rel_inode = (ino & (INOSPERPAGE - 1)); pageno = blkno >> sbi->l2nbperpage; if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { /* * OS/2 didn't always align inode extents on page boundaries */ inodes_left = (sbi->nbperpage - block_offset) << sbi->l2niperblk; if (rel_inode < inodes_left) rel_inode += block_offset << sbi->l2niperblk; else { pageno += 1; rel_inode -= inodes_left; } } /* read the page of disk inode */ mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); if (!mp) { jfs_err("diRead: read_metapage failed"); return -EIO; } /* locate the disk inode requested */ dp = (struct dinode *) mp->data; dp += rel_inode; if (ip->i_ino != le32_to_cpu(dp->di_number)) { jfs_error(ip->i_sb, "diRead: i_ino != di_number"); rc = -EIO; } else if (le32_to_cpu(dp->di_nlink) == 0) rc = -ESTALE; else /* copy the disk inode to the in-memory inode */ rc = copy_from_dinode(dp, ip); release_metapage(mp); /* set the ag for the inode */ JFS_IP(ip)->agno = BLKTOAG(agstart, sbi); JFS_IP(ip)->active_ag = -1; return (rc); } /* * NAME: diReadSpecial() * * FUNCTION: initialize a 'special' inode from disk. * * this routines handles aggregate level inodes. The * inode cache cannot differentiate between the * aggregate inodes and the filesystem inodes, so we * handle these here. We don't actually use the aggregate * inode map, since these inodes are at a fixed location * and in some cases the aggregate inode map isn't initialized * yet. * * PARAMETERS: * sb - filesystem superblock * inum - aggregate inode number * secondary - 1 if secondary aggregate inode table * * RETURN VALUES: * new inode - success * NULL - i/o error. */ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) { struct jfs_sb_info *sbi = JFS_SBI(sb); uint address; struct dinode *dp; struct inode *ip; struct metapage *mp; ip = new_inode(sb); if (ip == NULL) { jfs_err("diReadSpecial: new_inode returned NULL!"); return ip; } if (secondary) { address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; JFS_IP(ip)->ipimap = sbi->ipaimap2; } else { address = AITBL_OFF >> L2PSIZE; JFS_IP(ip)->ipimap = sbi->ipaimap; } ASSERT(inum < INOSPEREXT); ip->i_ino = inum; address += inum >> 3; /* 8 inodes per 4K page */ /* read the page of fixed disk inode (AIT) in raw mode */ mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1); if (mp == NULL) { ip->i_nlink = 1; /* Don't want iput() deleting it */ iput(ip); return (NULL); } /* get the pointer to the disk inode of interest */ dp = (struct dinode *) (mp->data); dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ if ((copy_from_dinode(dp, ip)) != 0) { /* handle bad return by returning NULL for ip */ ip->i_nlink = 1; /* Don't want iput() deleting it */ iput(ip); /* release the page */ release_metapage(mp); return (NULL); } ip->i_mapping->a_ops = &jfs_metapage_aops; mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS); /* Allocations to metadata inodes should not affect quotas */ ip->i_flags |= S_NOQUOTA; if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) { sbi->gengen = le32_to_cpu(dp->di_gengen); sbi->inostamp = le32_to_cpu(dp->di_inostamp); } /* release the page */ release_metapage(mp); /* * __mark_inode_dirty expects inodes to be hashed. Since we don't * want special inodes in the fileset inode space, we make them * appear hashed, but do not put on any lists. hlist_del() * will work fine and require no locking. */ hlist_add_fake(&ip->i_hash); return (ip); } /* * NAME: diWriteSpecial() * * FUNCTION: Write the special inode to disk * * PARAMETERS: * ip - special inode * secondary - 1 if secondary aggregate inode table * * RETURN VALUES: none */ void diWriteSpecial(struct inode *ip, int secondary) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); uint address; struct dinode *dp; ino_t inum = ip->i_ino; struct metapage *mp; if (secondary) address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; else address = AITBL_OFF >> L2PSIZE; ASSERT(inum < INOSPEREXT); address += inum >> 3; /* 8 inodes per 4K page */ /* read the page of fixed disk inode (AIT) in raw mode */ mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1); if (mp == NULL) { jfs_err("diWriteSpecial: failed to read aggregate inode " "extent!"); return; } /* get the pointer to the disk inode of interest */ dp = (struct dinode *) (mp->data); dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ copy_to_dinode(dp, ip); memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288); if (inum == FILESYSTEM_I) dp->di_gengen = cpu_to_le32(sbi->gengen); /* write the page */ write_metapage(mp); } /* * NAME: diFreeSpecial() * * FUNCTION: Free allocated space for special inode */ void diFreeSpecial(struct inode *ip) { if (ip == NULL) { jfs_err("diFreeSpecial called with NULL ip!"); return; } filemap_write_and_wait(ip->i_mapping); truncate_inode_pages(ip->i_mapping, 0); iput(ip); } /* * NAME: diWrite() * * FUNCTION: write the on-disk inode portion of the in-memory inode * to its corresponding on-disk inode. * * on entry, the specifed incore inode should itself * specify the disk inode number corresponding to the * incore inode (i.e. i_number should be initialized). * * the inode contains the inode extent address for the disk * inode. with the inode extent address in hand, the * page of the extent that contains the disk inode is * read and the disk inode portion of the incore inode * is copied to the disk inode. * * PARAMETERS: * tid - transacation id * ip - pointer to incore inode to be written to the inode extent. * * RETURN VALUES: * 0 - success * -EIO - i/o error. */ int diWrite(tid_t tid, struct inode *ip) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); struct jfs_inode_info *jfs_ip = JFS_IP(ip); int rc = 0; s32 ino; struct dinode *dp; s64 blkno; int block_offset; int inodes_left; struct metapage *mp; unsigned long pageno; int rel_inode; int dioffset; struct inode *ipimap; uint type; lid_t lid; struct tlock *ditlck, *tlck; struct linelock *dilinelock, *ilinelock; struct lv *lv; int n; ipimap = jfs_ip->ipimap; ino = ip->i_ino & (INOSPERIAG - 1); if (!addressPXD(&(jfs_ip->ixpxd)) || (lengthPXD(&(jfs_ip->ixpxd)) != JFS_IP(ipimap)->i_imap->im_nbperiext)) { jfs_error(ip->i_sb, "diWrite: ixpxd invalid"); return -EIO; } /* * read the page of disk inode containing the specified inode: */ /* compute the block address of the page */ blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage); rel_inode = (ino & (INOSPERPAGE - 1)); pageno = blkno >> sbi->l2nbperpage; if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { /* * OS/2 didn't always align inode extents on page boundaries */ inodes_left = (sbi->nbperpage - block_offset) << sbi->l2niperblk; if (rel_inode < inodes_left) rel_inode += block_offset << sbi->l2niperblk; else { pageno += 1; rel_inode -= inodes_left; } } /* read the page of disk inode */ retry: mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); if (!mp) return -EIO; /* get the pointer to the disk inode */ dp = (struct dinode *) mp->data; dp += rel_inode; dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE; /* * acquire transaction lock on the on-disk inode; * N.B. tlock is acquired on ipimap not ip; */ if ((ditlck = txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL) goto retry; dilinelock = (struct linelock *) & ditlck->lock; /* * copy btree root from in-memory inode to on-disk inode * * (tlock is taken from inline B+-tree root in in-memory * inode when the B+-tree root is updated, which is pointed * by jfs_ip->blid as well as being on tx tlock list) * * further processing of btree root is based on the copy * in in-memory inode, where txLog() will log from, and, * for xtree root, txUpdateMap() will update map and reset * XAD_NEW bit; */ if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) { /* * This is the special xtree inside the directory for storing * the directory table */ xtpage_t *p, *xp; xad_t *xad; jfs_ip->xtlid = 0; tlck = lid_to_tlock(lid); assert(tlck->type & tlckXTREE); tlck->type |= tlckBTROOT; tlck->mp = mp; ilinelock = (struct linelock *) & tlck->lock; /* * copy xtree root from inode to dinode: */ p = &jfs_ip->i_xtroot; xp = (xtpage_t *) &dp->di_dirtable; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], lv->length << L2XTSLOTSIZE); } /* reset on-disk (metadata page) xtree XAD_NEW bit */ xad = &xp->xad[XTENTRYSTART]; for (n = XTENTRYSTART; n < le16_to_cpu(xp->header.nextindex); n++, xad++) if (xad->flag & (XAD_NEW | XAD_EXTENDED)) xad->flag &= ~(XAD_NEW | XAD_EXTENDED); } if ((lid = jfs_ip->blid) == 0) goto inlineData; jfs_ip->blid = 0; tlck = lid_to_tlock(lid); type = tlck->type; tlck->type |= tlckBTROOT; tlck->mp = mp; ilinelock = (struct linelock *) & tlck->lock; /* * regular file: 16 byte (XAD slot) granularity */ if (type & tlckXTREE) { xtpage_t *p, *xp; xad_t *xad; /* * copy xtree root from inode to dinode: */ p = &jfs_ip->i_xtroot; xp = &dp->di_xtroot; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], lv->length << L2XTSLOTSIZE); } /* reset on-disk (metadata page) xtree XAD_NEW bit */ xad = &xp->xad[XTENTRYSTART]; for (n = XTENTRYSTART; n < le16_to_cpu(xp->header.nextindex); n++, xad++) if (xad->flag & (XAD_NEW | XAD_EXTENDED)) xad->flag &= ~(XAD_NEW | XAD_EXTENDED); } /* * directory: 32 byte (directory entry slot) granularity */ else if (type & tlckDTREE) { dtpage_t *p, *xp; /* * copy dtree root from inode to dinode: */ p = (dtpage_t *) &jfs_ip->i_dtroot; xp = (dtpage_t *) & dp->di_dtroot; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->slot[lv->offset], &p->slot[lv->offset], lv->length << L2DTSLOTSIZE); } } else { jfs_err("diWrite: UFO tlock"); } inlineData: /* * copy inline symlink from in-memory inode to on-disk inode */ if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) { lv = & dilinelock->lv[dilinelock->index]; lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE; lv->length = 2; memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE); dilinelock->index++; } /* * copy inline data from in-memory inode to on-disk inode: * 128 byte slot granularity */ if (test_cflag(COMMIT_Inlineea, ip)) { lv = & dilinelock->lv[dilinelock->index]; lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE; lv->length = 1; memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE); dilinelock->index++; clear_cflag(COMMIT_Inlineea, ip); } /* * lock/copy inode base: 128 byte slot granularity */ lv = & dilinelock->lv[dilinelock->index]; lv->offset = dioffset >> L2INODESLOTSIZE; copy_to_dinode(dp, ip); if (test_and_clear_cflag(COMMIT_Dirtable, ip)) { lv->length = 2; memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96); } else lv->length = 1; dilinelock->index++; /* release the buffer holding the updated on-disk inode. * the buffer will be later written by commit processing. */ write_metapage(mp); return (rc); } /* * NAME: diFree(ip) * * FUNCTION: free a specified inode from the inode working map * for a fileset or aggregate. * * if the inode to be freed represents the first (only) * free inode within the iag, the iag will be placed on * the ag free inode list. * * freeing the inode will cause the inode extent to be * freed if the inode is the only allocated inode within * the extent. in this case all the disk resource backing * up the inode extent will be freed. in addition, the iag * will be placed on the ag extent free list if the extent * is the first free extent in the iag. if freeing the * extent also means that no free inodes will exist for * the iag, the iag will also be removed from the ag free * inode list. * * the iag describing the inode will be freed if the extent * is to be freed and it is the only backed extent within * the iag. in this case, the iag will be removed from the * ag free extent list and ag free inode list and placed on * the inode map's free iag list. * * a careful update approach is used to provide consistency * in the face of updates to multiple buffers. under this * approach, all required buffers are obtained before making * any updates and are held until all updates are complete. * * PARAMETERS: * ip - inode to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error. */ int diFree(struct inode *ip) { int rc; ino_t inum = ip->i_ino; struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp; struct metapage *mp, *amp, *bmp, *cmp, *dmp; int iagno, ino, extno, bitno, sword, agno; int back, fwd; u32 bitmap, mask; struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap; struct inomap *imap = JFS_IP(ipimap)->i_imap; pxd_t freepxd; tid_t tid; struct inode *iplist[3]; struct tlock *tlck; struct pxd_lock *pxdlock; /* * This is just to suppress compiler warnings. The same logic that * references these variables is used to initialize them. */ aiagp = biagp = ciagp = diagp = NULL; /* get the iag number containing the inode. */ iagno = INOTOIAG(inum); /* make sure that the iag is contained within * the map. */ if (iagno >= imap->im_nextiag) { print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4, imap, 32, 0); jfs_error(ip->i_sb, "diFree: inum = %d, iagno = %d, nextiag = %d", (uint) inum, iagno, imap->im_nextiag); return -EIO; } /* get the allocation group for this ino. */ agno = JFS_IP(ip)->agno; /* Lock the AG specific inode map information */ AG_LOCK(imap, agno); /* Obtain read lock in imap inode. Don't release it until we have * read all of the IAG's that we are going to. */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* read the iag. */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } iagp = (struct iag *) mp->data; /* get the inode number and extent number of the inode within * the iag and the inode number within the extent. */ ino = inum & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); mask = HIGHORDER >> bitno; if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { jfs_error(ip->i_sb, "diFree: wmap shows inode already free"); } if (!addressPXD(&iagp->inoext[extno])) { release_metapage(mp); IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diFree: invalid inoext"); return -EIO; } /* compute the bitmap for the extent reflecting the freed inode. */ bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask; if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) { release_metapage(mp); IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diFree: numfree > numinos"); return -EIO; } /* * inode extent still has some inodes or below low water mark: * keep the inode extent; */ if (bitmap || imap->im_agctl[agno].numfree < 96 || (imap->im_agctl[agno].numfree < 288 && (((imap->im_agctl[agno].numfree * 100) / imap->im_agctl[agno].numinos) <= 25))) { /* if the iag currently has no free inodes (i.e., * the inode being freed is the first free inode of iag), * insert the iag at head of the inode free list for the ag. */ if (iagp->nfreeinos == 0) { /* check if there are any iags on the ag inode * free list. if so, read the first one so that * we can link the current iag onto the list at * the head. */ if ((fwd = imap->im_agctl[agno].inofree) >= 0) { /* read the iag that currently is the head * of the list. */ if ((rc = diIAGRead(imap, fwd, &amp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); release_metapage(mp); return (rc); } aiagp = (struct iag *) amp->data; /* make current head point back to the iag. */ aiagp->inofreeback = cpu_to_le32(iagno); write_metapage(amp); } /* iag points forward to current head and iag * becomes the new head of the list. */ iagp->inofreefwd = cpu_to_le32(imap->im_agctl[agno].inofree); iagp->inofreeback = cpu_to_le32(-1); imap->im_agctl[agno].inofree = iagno; } IREAD_UNLOCK(ipimap); /* update the free inode summary map for the extent if * freeing the inode means the extent will now have free * inodes (i.e., the inode being freed is the first free * inode of extent), */ if (iagp->wmap[extno] == cpu_to_le32(ONES)) { sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); iagp->inosmap[sword] &= cpu_to_le32(~(HIGHORDER >> bitno)); } /* update the bitmap. */ iagp->wmap[extno] = cpu_to_le32(bitmap); /* update the free inode counts at the iag, ag and * map level. */ le32_add_cpu(&iagp->nfreeinos, 1); imap->im_agctl[agno].numfree += 1; atomic_inc(&imap->im_numfree); /* release the AG inode map lock */ AG_UNLOCK(imap, agno); /* write the iag */ write_metapage(mp); return (0); } /* * inode extent has become free and above low water mark: * free the inode extent; */ /* * prepare to update iag list(s) (careful update step 1) */ amp = bmp = cmp = dmp = NULL; fwd = back = -1; /* check if the iag currently has no free extents. if so, * it will be placed on the head of the ag extent free list. */ if (iagp->nfreeexts == 0) { /* check if the ag extent free list has any iags. * if so, read the iag at the head of the list now. * this (head) iag will be updated later to reflect * the addition of the current iag at the head of * the list. */ if ((fwd = imap->im_agctl[agno].extfree) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } } else { /* iag has free extents. check if the addition of a free * extent will cause all extents to be free within this * iag. if so, the iag will be removed from the ag extent * free list and placed on the inode map's free iag list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { /* in preparation for removing the iag from the * ag extent free list, read the iags preceeding * and following the iag on the ag extent free * list. */ if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) { if ((rc = diIAGRead(imap, back, &bmp))) goto error_out; biagp = (struct iag *) bmp->data; } } } /* remove the iag from the ag inode free list if freeing * this extent cause the iag to have no free inodes. */ if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) { int inofreeback = le32_to_cpu(iagp->inofreeback); int inofreefwd = le32_to_cpu(iagp->inofreefwd); /* in preparation for removing the iag from the * ag inode free list, read the iags preceeding * and following the iag on the ag inode free * list. before reading these iags, we must make * sure that we already don't have them in hand * from up above, since re-reading an iag (buffer) * we are currently holding would cause a deadlock. */ if (inofreefwd >= 0) { if (inofreefwd == fwd) ciagp = (struct iag *) amp->data; else if (inofreefwd == back) ciagp = (struct iag *) bmp->data; else { if ((rc = diIAGRead(imap, inofreefwd, &cmp))) goto error_out; ciagp = (struct iag *) cmp->data; } assert(ciagp != NULL); } if (inofreeback >= 0) { if (inofreeback == fwd) diagp = (struct iag *) amp->data; else if (inofreeback == back) diagp = (struct iag *) bmp->data; else { if ((rc = diIAGRead(imap, inofreeback, &dmp))) goto error_out; diagp = (struct iag *) dmp->data; } assert(diagp != NULL); } } IREAD_UNLOCK(ipimap); /* * invalidate any page of the inode extent freed from buffer cache; */ freepxd = iagp->inoext[extno]; invalidate_pxd_metapages(ip, freepxd); /* * update iag list(s) (careful update step 2) */ /* add the iag to the ag extent free list if this is the * first free extent for the iag. */ if (iagp->nfreeexts == 0) { if (fwd >= 0) aiagp->extfreeback = cpu_to_le32(iagno); iagp->extfreefwd = cpu_to_le32(imap->im_agctl[agno].extfree); iagp->extfreeback = cpu_to_le32(-1); imap->im_agctl[agno].extfree = iagno; } else { /* remove the iag from the ag extent list if all extents * are now free and place it on the inode map iag free list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { if (fwd >= 0) aiagp->extfreeback = iagp->extfreeback; if (back >= 0) biagp->extfreefwd = iagp->extfreefwd; else imap->im_agctl[agno].extfree = le32_to_cpu(iagp->extfreefwd); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); IAGFREE_LOCK(imap); iagp->iagfree = cpu_to_le32(imap->im_freeiag); imap->im_freeiag = iagno; IAGFREE_UNLOCK(imap); } } /* remove the iag from the ag inode free list if freeing * this extent causes the iag to have no free inodes. */ if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) { if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) ciagp->inofreeback = iagp->inofreeback; if ((int) le32_to_cpu(iagp->inofreeback) >= 0) diagp->inofreefwd = iagp->inofreefwd; else imap->im_agctl[agno].inofree = le32_to_cpu(iagp->inofreefwd); iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); } /* update the inode extent address and working map * to reflect the free extent. * the permanent map should have been updated already * for the inode being freed. */ if (iagp->pmap[extno] != 0) { jfs_error(ip->i_sb, "diFree: the pmap does not show inode free"); } iagp->wmap[extno] = 0; PXDlength(&iagp->inoext[extno], 0); PXDaddress(&iagp->inoext[extno], 0); /* update the free extent and free inode summary maps * to reflect the freed extent. * the inode summary map is marked to indicate no inodes * available for the freed extent. */ sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); mask = HIGHORDER >> bitno; iagp->inosmap[sword] |= cpu_to_le32(mask); iagp->extsmap[sword] &= cpu_to_le32(~mask); /* update the number of free inodes and number of free extents * for the iag. */ le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1)); le32_add_cpu(&iagp->nfreeexts, 1); /* update the number of free inodes and backed inodes * at the ag and inode map level. */ imap->im_agctl[agno].numfree -= (INOSPEREXT - 1); imap->im_agctl[agno].numinos -= INOSPEREXT; atomic_sub(INOSPEREXT - 1, &imap->im_numfree); atomic_sub(INOSPEREXT, &imap->im_numinos); if (amp) write_metapage(amp); if (bmp) write_metapage(bmp); if (cmp) write_metapage(cmp); if (dmp) write_metapage(dmp); /* * start transaction to update block allocation map * for the inode extent freed; * * N.B. AG_LOCK is released and iag will be released below, and * other thread may allocate inode from/reusing the ixad freed * BUT with new/different backing inode extent from the extent * to be freed by the transaction; */ tid = txBegin(ipimap->i_sb, COMMIT_FORCE); mutex_lock(&JFS_IP(ipimap)->commit_mutex); /* acquire tlock of the iag page of the freed ixad * to force the page NOHOMEOK (even though no data is * logged from the iag page) until NOREDOPAGE|FREEXTENT log * for the free of the extent is committed; * write FREEXTENT|NOREDOPAGE log record * N.B. linelock is overlaid as freed extent descriptor; */ tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE); pxdlock = (struct pxd_lock *) & tlck->lock; pxdlock->flag = mlckFREEPXD; pxdlock->pxd = freepxd; pxdlock->index = 1; write_metapage(mp); iplist[0] = ipimap; /* * logredo needs the IAG number and IAG extent index in order * to ensure that the IMap is consistent. The least disruptive * way to pass these values through to the transaction manager * is in the iplist array. * * It's not pretty, but it works. */ iplist[1] = (struct inode *) (size_t)iagno; iplist[2] = (struct inode *) (size_t)extno; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* unlock the AG inode map information */ AG_UNLOCK(imap, agno); return (0); error_out: IREAD_UNLOCK(ipimap); if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); if (cmp) release_metapage(cmp); if (dmp) release_metapage(dmp); AG_UNLOCK(imap, agno); release_metapage(mp); return (rc); } /* * There are several places in the diAlloc* routines where we initialize * the inode. */ static inline void diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); struct jfs_inode_info *jfs_ip = JFS_IP(ip); ip->i_ino = (iagno << L2INOSPERIAG) + ino; jfs_ip->ixpxd = iagp->inoext[extno]; jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); jfs_ip->active_ag = -1; } /* * NAME: diAlloc(pip,dir,ip) * * FUNCTION: allocate a disk inode from the inode working map * for a fileset or aggregate. * * PARAMETERS: * pip - pointer to incore inode for the parent inode. * dir - 'true' if the new disk inode is for a directory. * ip - pointer to a new inode * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ int diAlloc(struct inode *pip, bool dir, struct inode *ip) { int rc, ino, iagno, addext, extno, bitno, sword; int nwords, rem, i, agno; u32 mask, inosmap, extsmap; struct inode *ipimap; struct metapage *mp; ino_t inum; struct iag *iagp; struct inomap *imap; /* get the pointers to the inode map inode and the * corresponding imap control structure. */ ipimap = JFS_SBI(pip->i_sb)->ipimap; imap = JFS_IP(ipimap)->i_imap; JFS_IP(ip)->ipimap = ipimap; JFS_IP(ip)->fileset = FILESYSTEM_I; /* for a directory, the allocation policy is to start * at the ag level using the preferred ag. */ if (dir) { agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap); AG_LOCK(imap, agno); goto tryag; } /* for files, the policy starts off by trying to allocate from * the same iag containing the parent disk inode: * try to allocate the new disk inode close to the parent disk * inode, using parent disk inode number + 1 as the allocation * hint. (we use a left-to-right policy to attempt to avoid * moving backward on the disk.) compute the hint within the * file system and the iag. */ /* get the ag number of this iag */ agno = JFS_IP(pip)->agno; if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { /* * There is an open file actively growing. We want to * allocate new inodes from a different ag to avoid * fragmentation problems. */ agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap); AG_LOCK(imap, agno); goto tryag; } inum = pip->i_ino + 1; ino = inum & (INOSPERIAG - 1); /* back off the hint if it is outside of the iag */ if (ino == 0) inum = pip->i_ino; /* lock the AG inode map information */ AG_LOCK(imap, agno); /* Get read lock on imap inode */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* get the iag number and read the iag */ iagno = INOTOIAG(inum); if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } iagp = (struct iag *) mp->data; /* determine if new inode extent is allowed to be added to the iag. * new inode extent can be added to the iag if the ag * has less than 32 free disk inodes and the iag has free extents. */ addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts); /* * try to allocate from the IAG */ /* check if the inode may be allocated from the iag * (i.e. the inode has free inodes or new extent can be added). */ if (iagp->nfreeinos || addext) { /* determine the extent number of the hint. */ extno = ino >> L2INOSPEREXT; /* check if the extent containing the hint has backed * inodes. if so, try to allocate within this extent. */ if (addressPXD(&iagp->inoext[extno])) { bitno = ino & (INOSPEREXT - 1); if ((bitno = diFindFree(le32_to_cpu(iagp->wmap[extno]), bitno)) < INOSPEREXT) { ino = (extno << L2INOSPEREXT) + bitno; /* a free inode (bit) was found within this * extent, so allocate it. */ rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(ipimap); if (rc) { assert(rc == -EIO); } else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the AG lock and return. */ AG_UNLOCK(imap, agno); return (rc); } if (!addext) extno = (extno == EXTSPERIAG - 1) ? 0 : extno + 1; } /* * no free inodes within the extent containing the hint. * * try to allocate from the backed extents following * hint or, if appropriate (i.e. addext is true), allocate * an extent of free inodes at or following the extent * containing the hint. * * the free inode and free extent summary maps are used * here, so determine the starting summary map position * and the number of words we'll have to examine. again, * the approach is to allocate following the hint, so we * might have to initially ignore prior bits of the summary * map that represent extents prior to the extent containing * the hint and later revisit these bits. */ bitno = extno & (EXTSPERSUM - 1); nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1; sword = extno >> L2EXTSPERSUM; /* mask any prior bits for the starting words of the * summary map. */ mask = ONES << (EXTSPERSUM - bitno); inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask; extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask; /* scan the free inode and free extent summary maps for * free resources. */ for (i = 0; i < nwords; i++) { /* check if this word of the free inode summary * map describes an extent with free inodes. */ if (~inosmap) { /* an extent with free inodes has been * found. determine the extent number * and the inode number within the extent. */ rem = diFindFree(inosmap, 0); extno = (sword << L2EXTSPERSUM) + rem; rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0); if (rem >= INOSPEREXT) { IREAD_UNLOCK(ipimap); release_metapage(mp); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diAlloc: can't find free bit " "in wmap"); return -EIO; } /* determine the inode number within the * iag and allocate the inode from the * map. */ ino = (extno << L2INOSPEREXT) + rem; rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(ipimap); if (rc) assert(rc == -EIO); else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the AG lock and return. */ AG_UNLOCK(imap, agno); return (rc); } /* check if we may allocate an extent of free * inodes and whether this word of the free * extents summary map describes a free extent. */ if (addext && ~extsmap) { /* a free extent has been found. determine * the extent number. */ rem = diFindFree(extsmap, 0); extno = (sword << L2EXTSPERSUM) + rem; /* allocate an extent of free inodes. */ if ((rc = diNewExt(imap, iagp, extno))) { /* if there is no disk space for a * new extent, try to allocate the * disk inode from somewhere else. */ if (rc == -ENOSPC) break; assert(rc == -EIO); } else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the imap inode & the AG lock & return. */ IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } /* move on to the next set of summary map words. */ sword = (sword == SMAPSZ - 1) ? 0 : sword + 1; inosmap = le32_to_cpu(iagp->inosmap[sword]); extsmap = le32_to_cpu(iagp->extsmap[sword]); } } /* unlock imap inode */ IREAD_UNLOCK(ipimap); /* nothing doing in this iag, so release it. */ release_metapage(mp); tryag: /* * try to allocate anywhere within the same AG as the parent inode. */ rc = diAllocAG(imap, agno, dir, ip); AG_UNLOCK(imap, agno); if (rc != -ENOSPC) return (rc); /* * try to allocate in any AG. */ return (diAllocAny(imap, agno, dir, ip)); } /* * NAME: diAllocAG(imap,agno,dir,ip) * * FUNCTION: allocate a disk inode from the allocation group. * * this routine first determines if a new extent of free * inodes should be added for the allocation group, with * the current request satisfied from this extent. if this * is the case, an attempt will be made to do just that. if * this attempt fails or it has been determined that a new * extent should not be added, an attempt is made to satisfy * the request by allocating an existing (backed) free inode * from the allocation group. * * PRE CONDITION: Already have the AG lock for this AG. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group to allocate from. * dir - 'true' if the new disk inode is for a directory. * ip - pointer to the new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip) { int rc, addext, numfree, numinos; /* get the number of free and the number of backed disk * inodes currently within the ag. */ numfree = imap->im_agctl[agno].numfree; numinos = imap->im_agctl[agno].numinos; if (numfree > numinos) { jfs_error(ip->i_sb, "diAllocAG: numfree > numinos"); return -EIO; } /* determine if we should allocate a new extent of free inodes * within the ag: for directory inodes, add a new extent * if there are a small number of free inodes or number of free * inodes is a small percentage of the number of backed inodes. */ if (dir) addext = (numfree < 64 || (numfree < 256 && ((numfree * 100) / numinos) <= 20)); else addext = (numfree == 0); /* * try to allocate a new extent of free inodes. */ if (addext) { /* if free space is not avaliable for this new extent, try * below to allocate a free and existing (already backed) * inode from the ag. */ if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC) return (rc); } /* * try to allocate an existing free inode from the ag. */ return (diAllocIno(imap, agno, ip)); } /* * NAME: diAllocAny(imap,agno,dir,iap) * * FUNCTION: allocate a disk inode from any other allocation group. * * this routine is called when an allocation attempt within * the primary allocation group has failed. if attempts to * allocate an inode from any allocation group other than the * specified primary group. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - primary allocation group (to avoid). * dir - 'true' if the new disk inode is for a directory. * ip - pointer to a new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip) { int ag, rc; int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag; /* try to allocate from the ags following agno up to * the maximum ag number. */ for (ag = agno + 1; ag <= maxag; ag++) { AG_LOCK(imap, ag); rc = diAllocAG(imap, ag, dir, ip); AG_UNLOCK(imap, ag); if (rc != -ENOSPC) return (rc); } /* try to allocate from the ags in front of agno. */ for (ag = 0; ag < agno; ag++) { AG_LOCK(imap, ag); rc = diAllocAG(imap, ag, dir, ip); AG_UNLOCK(imap, ag); if (rc != -ENOSPC) return (rc); } /* no free disk inodes. */ return -ENOSPC; } /* * NAME: diAllocIno(imap,agno,ip) * * FUNCTION: allocate a disk inode from the allocation group's free * inode list, returning an error if this free list is * empty (i.e. no iags on the list). * * allocation occurs from the first iag on the list using * the iag's free inode summary map to find the leftmost * free inode in the iag. * * PRE CONDITION: Already have AG lock for this AG. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group. * ip - pointer to new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip) { int iagno, ino, rc, rem, extno, sword; struct metapage *mp; struct iag *iagp; /* check if there are iags on the ag's free inode list. */ if ((iagno = imap->im_agctl[agno].inofree) < 0) return -ENOSPC; /* obtain read lock on imap inode */ IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP); /* read the iag at the head of the list. */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(imap->im_ipimap); return (rc); } iagp = (struct iag *) mp->data; /* better be free inodes in this iag if it is on the * list. */ if (!iagp->nfreeinos) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: nfreeinos = 0, but iag on freelist"); return -EIO; } /* scan the free inode summary map to find an extent * with free inodes. */ for (sword = 0;; sword++) { if (sword >= SMAPSZ) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: free inode not found in summary map"); return -EIO; } if (~iagp->inosmap[sword]) break; } /* found a extent with free inodes. determine * the extent number. */ rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0); if (rem >= EXTSPERSUM) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: no free extent found"); return -EIO; } extno = (sword << L2EXTSPERSUM) + rem; /* find the first free inode in the extent. */ rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0); if (rem >= INOSPEREXT) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: free inode not found"); return -EIO; } /* compute the inode number within the iag. */ ino = (extno << L2INOSPEREXT) + rem; /* allocate the inode. */ rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(imap->im_ipimap); if (rc) { release_metapage(mp); return (rc); } /* set the results of the allocation and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); write_metapage(mp); return (0); } /* * NAME: diAllocExt(imap,agno,ip) * * FUNCTION: add a new extent of free inodes to an iag, allocating * an inode from this extent to satisfy the current allocation * request. * * this routine first tries to find an existing iag with free * extents through the ag free extent list. if list is not * empty, the head of the list will be selected as the home * of the new extent of free inodes. otherwise (the list is * empty), a new iag will be allocated for the ag to contain * the extent. * * once an iag has been selected, the free extent summary map * is used to locate a free extent within the iag and diNewExt() * is called to initialize the extent, with initialization * including the allocation of the first inode of the extent * for the purpose of satisfying this request. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group number. * ip - pointer to new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip) { int rem, iagno, sword, extno, rc; struct metapage *mp; struct iag *iagp; /* check if the ag has any iags with free extents. if not, * allocate a new iag for the ag. */ if ((iagno = imap->im_agctl[agno].extfree) < 0) { /* If successful, diNewIAG will obtain the read lock on the * imap inode. */ if ((rc = diNewIAG(imap, &iagno, agno, &mp))) { return (rc); } iagp = (struct iag *) mp->data; /* set the ag number if this a brand new iag */ iagp->agstart = cpu_to_le64(AGTOBLK(agno, imap->im_ipimap)); } else { /* read the iag. */ IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP); if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: error reading iag"); return rc; } iagp = (struct iag *) mp->data; } /* using the free extent summary map, find a free extent. */ for (sword = 0;; sword++) { if (sword >= SMAPSZ) { release_metapage(mp); IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: free ext summary map not found"); return -EIO; } if (~iagp->extsmap[sword]) break; } /* determine the extent number of the free extent. */ rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0); if (rem >= EXTSPERSUM) { release_metapage(mp); IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: free extent not found"); return -EIO; } extno = (sword << L2EXTSPERSUM) + rem; /* initialize the new extent. */ rc = diNewExt(imap, iagp, extno); IREAD_UNLOCK(imap->im_ipimap); if (rc) { /* something bad happened. if a new iag was allocated, * place it back on the inode map's iag free list, and * clear the ag number information. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { IAGFREE_LOCK(imap); iagp->iagfree = cpu_to_le32(imap->im_freeiag); imap->im_freeiag = iagno; IAGFREE_UNLOCK(imap); } write_metapage(mp); return (rc); } /* set the results of the allocation and write the iag. */ diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp); write_metapage(mp); return (0); } /* * NAME: diAllocBit(imap,iagp,ino) * * FUNCTION: allocate a backed inode from an iag. * * this routine performs the mechanics of allocating a * specified inode from a backed extent. * * if the inode to be allocated represents the last free * inode within the iag, the iag will be removed from the * ag free inode list. * * a careful update approach is used to provide consistency * in the face of updates to multiple buffers. under this * approach, all required buffers are obtained before making * any updates and are held all are updates are complete. * * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on * this AG. Must have read lock on imap inode. * * PARAMETERS: * imap - pointer to inode map control structure. * iagp - pointer to iag. * ino - inode number to be allocated within the iag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino) { int extno, bitno, agno, sword, rc; struct metapage *amp = NULL, *bmp = NULL; struct iag *aiagp = NULL, *biagp = NULL; u32 mask; /* check if this is the last free inode within the iag. * if so, it will have to be removed from the ag free * inode list, so get the iags preceeding and following * it on the list. */ if (iagp->nfreeinos == cpu_to_le32(1)) { if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) { if ((rc = diIAGRead(imap, le32_to_cpu(iagp->inofreefwd), &amp))) return (rc); aiagp = (struct iag *) amp->data; } if ((int) le32_to_cpu(iagp->inofreeback) >= 0) { if ((rc = diIAGRead(imap, le32_to_cpu(iagp->inofreeback), &bmp))) { if (amp) release_metapage(amp); return (rc); } biagp = (struct iag *) bmp->data; } } /* get the ag number, extent number, inode number within * the extent. */ agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb)); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); /* compute the mask for setting the map. */ mask = HIGHORDER >> bitno; /* the inode should be free and backed. */ if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) || ((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) || (addressPXD(&iagp->inoext[extno]) == 0)) { if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); jfs_error(imap->im_ipimap->i_sb, "diAllocBit: iag inconsistent"); return -EIO; } /* mark the inode as allocated in the working map. */ iagp->wmap[extno] |= cpu_to_le32(mask); /* check if all inodes within the extent are now * allocated. if so, update the free inode summary * map to reflect this. */ if (iagp->wmap[extno] == cpu_to_le32(ONES)) { sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno); } /* if this was the last free inode in the iag, remove the * iag from the ag free inode list. */ if (iagp->nfreeinos == cpu_to_le32(1)) { if (amp) { aiagp->inofreeback = iagp->inofreeback; write_metapage(amp); } if (bmp) { biagp->inofreefwd = iagp->inofreefwd; write_metapage(bmp); } else { imap->im_agctl[agno].inofree = le32_to_cpu(iagp->inofreefwd); } iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); } /* update the free inode count at the iag, ag, inode * map levels. */ le32_add_cpu(&iagp->nfreeinos, -1); imap->im_agctl[agno].numfree -= 1; atomic_dec(&imap->im_numfree); return (0); } /* * NAME: diNewExt(imap,iagp,extno) * * FUNCTION: initialize a new extent of inodes for an iag, allocating * the first inode of the extent for use for the current * allocation request. * * disk resources are allocated for the new extent of inodes * and the inodes themselves are initialized to reflect their * existence within the extent (i.e. their inode numbers and * inode extent addresses are set) and their initial state * (mode and link count are set to zero). * * if the iag is new, it is not yet on an ag extent free list * but will now be placed on this list. * * if the allocation of the new extent causes the iag to * have no free extent, the iag will be removed from the * ag extent free list. * * if the iag has no free backed inodes, it will be placed * on the ag free inode list, since the addition of the new * extent will now cause it to have free inodes. * * a careful update approach is used to provide consistency * (i.e. list consistency) in the face of updates to multiple * buffers. under this approach, all required buffers are * obtained before making any updates and are held until all * updates are complete. * * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on * this AG. Must have read lock on imap inode. * * PARAMETERS: * imap - pointer to inode map control structure. * iagp - pointer to iag. * extno - extent number. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno) { int agno, iagno, fwd, back, freei = 0, sword, rc; struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL; struct metapage *amp, *bmp, *cmp, *dmp; struct inode *ipimap; s64 blkno, hint; int i, j; u32 mask; ino_t ino; struct dinode *dp; struct jfs_sb_info *sbi; /* better have free extents. */ if (!iagp->nfreeexts) { jfs_error(imap->im_ipimap->i_sb, "diNewExt: no free extents"); return -EIO; } /* get the inode map inode. */ ipimap = imap->im_ipimap; sbi = JFS_SBI(ipimap->i_sb); amp = bmp = cmp = NULL; /* get the ag and iag numbers for this iag. */ agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); iagno = le32_to_cpu(iagp->iagnum); /* check if this is the last free extent within the * iag. if so, the iag must be removed from the ag * free extent list, so get the iags preceeding and * following the iag on this list. */ if (iagp->nfreeexts == cpu_to_le32(1)) { if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) return (rc); aiagp = (struct iag *) amp->data; } if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) { if ((rc = diIAGRead(imap, back, &bmp))) goto error_out; biagp = (struct iag *) bmp->data; } } else { /* the iag has free extents. if all extents are free * (as is the case for a newly allocated iag), the iag * must be added to the ag free extent list, so get * the iag at the head of the list in preparation for * adding this iag to this list. */ fwd = back = -1; if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { if ((fwd = imap->im_agctl[agno].extfree) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } } } /* check if the iag has no free inodes. if so, the iag * will have to be added to the ag free inode list, so get * the iag at the head of the list in preparation for * adding this iag to this list. in doing this, we must * check if we already have the iag at the head of * the list in hand. */ if (iagp->nfreeinos == 0) { freei = imap->im_agctl[agno].inofree; if (freei >= 0) { if (freei == fwd) { ciagp = aiagp; } else if (freei == back) { ciagp = biagp; } else { if ((rc = diIAGRead(imap, freei, &cmp))) goto error_out; ciagp = (struct iag *) cmp->data; } if (ciagp == NULL) { jfs_error(imap->im_ipimap->i_sb, "diNewExt: ciagp == NULL"); rc = -EIO; goto error_out; } } } /* allocate disk space for the inode extent. */ if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0)) hint = ((s64) agno << sbi->bmap->db_agl2size) - 1; else hint = addressPXD(&iagp->inoext[extno - 1]) + lengthPXD(&iagp->inoext[extno - 1]) - 1; if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno))) goto error_out; /* compute the inode number of the first inode within the * extent. */ ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT); /* initialize the inodes within the newly allocated extent a * page at a time. */ for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) { /* get a buffer for this page of disk inodes. */ dmp = get_metapage(ipimap, blkno + i, PSIZE, 1); if (dmp == NULL) { rc = -EIO; goto error_out; } dp = (struct dinode *) dmp->data; /* initialize the inode number, mode, link count and * inode extent address. */ for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) { dp->di_inostamp = cpu_to_le32(sbi->inostamp); dp->di_number = cpu_to_le32(ino); dp->di_fileset = cpu_to_le32(FILESYSTEM_I); dp->di_mode = 0; dp->di_nlink = 0; PXDaddress(&(dp->di_ixpxd), blkno); PXDlength(&(dp->di_ixpxd), imap->im_nbperiext); } write_metapage(dmp); } /* if this is the last free extent within the iag, remove the * iag from the ag free extent list. */ if (iagp->nfreeexts == cpu_to_le32(1)) { if (fwd >= 0) aiagp->extfreeback = iagp->extfreeback; if (back >= 0) biagp->extfreefwd = iagp->extfreefwd; else imap->im_agctl[agno].extfree = le32_to_cpu(iagp->extfreefwd); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); } else { /* if the iag has all free extents (newly allocated iag), * add the iag to the ag free extent list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { if (fwd >= 0) aiagp->extfreeback = cpu_to_le32(iagno); iagp->extfreefwd = cpu_to_le32(fwd); iagp->extfreeback = cpu_to_le32(-1); imap->im_agctl[agno].extfree = iagno; } } /* if the iag has no free inodes, add the iag to the * ag free inode list. */ if (iagp->nfreeinos == 0) { if (freei >= 0) ciagp->inofreeback = cpu_to_le32(iagno); iagp->inofreefwd = cpu_to_le32(imap->im_agctl[agno].inofree); iagp->inofreeback = cpu_to_le32(-1); imap->im_agctl[agno].inofree = iagno; } /* initialize the extent descriptor of the extent. */ PXDlength(&iagp->inoext[extno], imap->im_nbperiext); PXDaddress(&iagp->inoext[extno], blkno); /* initialize the working and persistent map of the extent. * the working map will be initialized such that * it indicates the first inode of the extent is allocated. */ iagp->wmap[extno] = cpu_to_le32(HIGHORDER); iagp->pmap[extno] = 0; /* update the free inode and free extent summary maps * for the extent to indicate the extent has free inodes * and no longer represents a free extent. */ sword = extno >> L2EXTSPERSUM; mask = HIGHORDER >> (extno & (EXTSPERSUM - 1)); iagp->extsmap[sword] |= cpu_to_le32(mask); iagp->inosmap[sword] &= cpu_to_le32(~mask); /* update the free inode and free extent counts for the * iag. */ le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1)); le32_add_cpu(&iagp->nfreeexts, -1); /* update the free and backed inode counts for the ag. */ imap->im_agctl[agno].numfree += (INOSPEREXT - 1); imap->im_agctl[agno].numinos += INOSPEREXT; /* update the free and backed inode counts for the inode map. */ atomic_add(INOSPEREXT - 1, &imap->im_numfree); atomic_add(INOSPEREXT, &imap->im_numinos); /* write the iags. */ if (amp) write_metapage(amp); if (bmp) write_metapage(bmp); if (cmp) write_metapage(cmp); return (0); error_out: /* release the iags. */ if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); if (cmp) release_metapage(cmp); return (rc); } /* * NAME: diNewIAG(imap,iagnop,agno) * * FUNCTION: allocate a new iag for an allocation group. * * first tries to allocate the iag from the inode map * iagfree list: * if the list has free iags, the head of the list is removed * and returned to satisfy the request. * if the inode map's iag free list is empty, the inode map * is extended to hold a new iag. this new iag is initialized * and returned to satisfy the request. * * PARAMETERS: * imap - pointer to inode map control structure. * iagnop - pointer to an iag number set with the number of the * newly allocated iag upon successful return. * agno - allocation group number. * bpp - Buffer pointer to be filled in with new IAG's buffer * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. * * serialization: * AG lock held on entry/exit; * write lock on the map is held inside; * read lock on the map is held on successful completion; * * note: new iag transaction: * . synchronously write iag; * . write log of xtree and inode of imap; * . commit; * . synchronous write of xtree (right to left, bottom to top); * . at start of logredo(): init in-memory imap with one additional iag page; * . at end of logredo(): re-read imap inode to determine * new imap size; */ static int diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) { int rc; int iagno, i, xlen; struct inode *ipimap; struct super_block *sb; struct jfs_sb_info *sbi; struct metapage *mp; struct iag *iagp; s64 xaddr = 0; s64 blkno; tid_t tid; struct inode *iplist[1]; /* pick up pointers to the inode map and mount inodes */ ipimap = imap->im_ipimap; sb = ipimap->i_sb; sbi = JFS_SBI(sb); /* acquire the free iag lock */ IAGFREE_LOCK(imap); /* if there are any iags on the inode map free iag list, * allocate the iag from the head of the list. */ if (imap->im_freeiag >= 0) { /* pick up the iag number at the head of the list */ iagno = imap->im_freeiag; /* determine the logical block number of the iag */ blkno = IAGTOLBLK(iagno, sbi->l2nbperpage); } else { /* no free iags. the inode map will have to be extented * to include a new iag. */ /* acquire inode map lock */ IWRITE_LOCK(ipimap, RDWRLOCK_IMAP); if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) { IWRITE_UNLOCK(ipimap); IAGFREE_UNLOCK(imap); jfs_error(imap->im_ipimap->i_sb, "diNewIAG: ipimap->i_size is wrong"); return -EIO; } /* get the next avaliable iag number */ iagno = imap->im_nextiag; /* make sure that we have not exceeded the maximum inode * number limit. */ if (iagno > (MAXIAGS - 1)) { /* release the inode map lock */ IWRITE_UNLOCK(ipimap); rc = -ENOSPC; goto out; } /* * synchronously append new iag page. */ /* determine the logical address of iag page to append */ blkno = IAGTOLBLK(iagno, sbi->l2nbperpage); /* Allocate extent for new iag page */ xlen = sbi->nbperpage; if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) { /* release the inode map lock */ IWRITE_UNLOCK(ipimap); goto out; } /* * start transaction of update of the inode map * addressing structure pointing to the new iag page; */ tid = txBegin(sb, COMMIT_FORCE); mutex_lock(&JFS_IP(ipimap)->commit_mutex); /* update the inode map addressing structure to point to it */ if ((rc = xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* Free the blocks allocated for the iag since it was * not successfully added to the inode map */ dbFree(ipimap, xaddr, (s64) xlen); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); goto out; } /* update the inode map's inode to reflect the extension */ ipimap->i_size += PSIZE; inode_add_bytes(ipimap, PSIZE); /* assign a buffer for the page */ mp = get_metapage(ipimap, blkno, PSIZE, 0); if (!mp) { /* * This is very unlikely since we just created the * extent, but let's try to handle it correctly */ xtTruncate(tid, ipimap, ipimap->i_size - PSIZE, COMMIT_PWMAP); txAbort(tid, 0); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); rc = -EIO; goto out; } iagp = (struct iag *) mp->data; /* init the iag */ memset(iagp, 0, sizeof(struct iag)); iagp->iagnum = cpu_to_le32(iagno); iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); iagp->iagfree = cpu_to_le32(-1); iagp->nfreeinos = 0; iagp->nfreeexts = cpu_to_le32(EXTSPERIAG); /* initialize the free inode summary map (free extent * summary map initialization handled by bzero). */ for (i = 0; i < SMAPSZ; i++) iagp->inosmap[i] = cpu_to_le32(ONES); /* * Write and sync the metapage */ flush_metapage(mp); /* * txCommit(COMMIT_FORCE) will synchronously write address * index pages and inode after commit in careful update order * of address index pages (right to left, bottom up); */ iplist[0] = ipimap; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); duplicateIXtree(sb, blkno, xlen, &xaddr); /* update the next avaliable iag number */ imap->im_nextiag += 1; /* Add the iag to the iag free list so we don't lose the iag * if a failure happens now. */ imap->im_freeiag = iagno; /* Until we have logredo working, we want the imap inode & * control page to be up to date. */ diSync(ipimap); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); } /* obtain read lock on map */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* read the iag */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); rc = -EIO; goto out; } iagp = (struct iag *) mp->data; /* remove the iag from the iag free list */ imap->im_freeiag = le32_to_cpu(iagp->iagfree); iagp->iagfree = cpu_to_le32(-1); /* set the return iag number and buffer pointer */ *iagnop = iagno; *mpp = mp; out: /* release the iag free lock */ IAGFREE_UNLOCK(imap); return (rc); } /* * NAME: diIAGRead() * * FUNCTION: get the buffer for the specified iag within a fileset * or aggregate inode map. * * PARAMETERS: * imap - pointer to inode map control structure. * iagno - iag number. * bpp - point to buffer pointer to be filled in on successful * exit. * * SERIALIZATION: * must have read lock on imap inode * (When called by diExtendFS, the filesystem is quiesced, therefore * the read lock is unnecessary.) * * RETURN VALUES: * 0 - success. * -EIO - i/o error. */ static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp) { struct inode *ipimap = imap->im_ipimap; s64 blkno; /* compute the logical block number of the iag. */ blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage); /* read the iag. */ *mpp = read_metapage(ipimap, blkno, PSIZE, 0); if (*mpp == NULL) { return -EIO; } return (0); } /* * NAME: diFindFree() * * FUNCTION: find the first free bit in a word starting at * the specified bit position. * * PARAMETERS: * word - word to be examined. * start - starting bit position. * * RETURN VALUES: * bit position of first free bit in the word or 32 if * no free bits were found. */ static int diFindFree(u32 word, int start) { int bitno; assert(start < 32); /* scan the word for the first free bit. */ for (word <<= start, bitno = start; bitno < 32; bitno++, word <<= 1) { if ((word & HIGHORDER) == 0) break; } return (bitno); } /* * NAME: diUpdatePMap() * * FUNCTION: Update the persistent map in an IAG for the allocation or * freeing of the specified inode. * * PRE CONDITIONS: Working map has already been updated for allocate. * * PARAMETERS: * ipimap - Incore inode map inode * inum - Number of inode to mark in permanent map * is_free - If 'true' indicates inode should be marked freed, otherwise * indicates inode should be marked allocated. * * RETURN VALUES: * 0 for success */ int diUpdatePMap(struct inode *ipimap, unsigned long inum, bool is_free, struct tblock * tblk) { int rc; struct iag *iagp; struct metapage *mp; int iagno, ino, extno, bitno; struct inomap *imap; u32 mask; struct jfs_log *log; int lsn, difft, diffp; unsigned long flags; imap = JFS_IP(ipimap)->i_imap; /* get the iag number containing the inode */ iagno = INOTOIAG(inum); /* make sure that the iag is contained within the map */ if (iagno >= imap->im_nextiag) { jfs_error(ipimap->i_sb, "diUpdatePMap: the iag is outside the map"); return -EIO; } /* read the iag */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); rc = diIAGRead(imap, iagno, &mp); IREAD_UNLOCK(ipimap); if (rc) return (rc); metapage_wait_for_io(mp); iagp = (struct iag *) mp->data; /* get the inode number and extent number of the inode within * the iag and the inode number within the extent. */ ino = inum & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); mask = HIGHORDER >> bitno; /* * mark the inode free in persistent map: */ if (is_free) { /* The inode should have been allocated both in working * map and in persistent map; * the inode will be freed from working map at the release * of last reference release; */ if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { jfs_error(ipimap->i_sb, "diUpdatePMap: inode %ld not marked as " "allocated in wmap!", inum); } if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) { jfs_error(ipimap->i_sb, "diUpdatePMap: inode %ld not marked as " "allocated in pmap!", inum); } /* update the bitmap for the extent of the freed inode */ iagp->pmap[extno] &= cpu_to_le32(~mask); } /* * mark the inode allocated in persistent map: */ else { /* The inode should be already allocated in the working map * and should be free in persistent map; */ if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { release_metapage(mp); jfs_error(ipimap->i_sb, "diUpdatePMap: the inode is not allocated in " "the working map"); return -EIO; } if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) { release_metapage(mp); jfs_error(ipimap->i_sb, "diUpdatePMap: the inode is not free in the " "persistent map"); return -EIO; } /* update the bitmap for the extent of the allocated inode */ iagp->pmap[extno] |= cpu_to_le32(mask); } /* * update iag lsn */ lsn = tblk->lsn; log = JFS_SBI(tblk->sb)->log; LOGSYNC_LOCK(log, flags); if (mp->lsn != 0) { /* inherit older/smaller lsn */ logdiff(difft, lsn, log); logdiff(diffp, mp->lsn, log); if (difft < diffp) { mp->lsn = lsn; /* move mp after tblock in logsync list */ list_move(&mp->synclist, &tblk->synclist); } /* inherit younger/larger clsn */ assert(mp->clsn); logdiff(difft, tblk->clsn, log); logdiff(diffp, mp->clsn, log); if (difft > diffp) mp->clsn = tblk->clsn; } else { mp->log = log; mp->lsn = lsn; /* insert mp after tblock in logsync list */ log->count++; list_add(&mp->synclist, &tblk->synclist); mp->clsn = tblk->clsn; } LOGSYNC_UNLOCK(log, flags); write_metapage(mp); return (0); } /* * diExtendFS() * * function: update imap for extendfs(); * * note: AG size has been increased s.t. each k old contiguous AGs are * coalesced into a new AG; */ int diExtendFS(struct inode *ipimap, struct inode *ipbmap) { int rc, rcx = 0; struct inomap *imap = JFS_IP(ipimap)->i_imap; struct iag *iagp = NULL, *hiagp = NULL; struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap; struct metapage *bp, *hbp; int i, n, head; int numinos, xnuminos = 0, xnumfree = 0; s64 agstart; jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d", imap->im_nextiag, atomic_read(&imap->im_numinos), atomic_read(&imap->im_numfree)); /* * reconstruct imap * * coalesce contiguous k (newAGSize/oldAGSize) AGs; * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn; * note: new AG size = old AG size * (2**x). */ /* init per AG control information im_agctl[] */ for (i = 0; i < MAXAG; i++) { imap->im_agctl[i].inofree = -1; imap->im_agctl[i].extfree = -1; imap->im_agctl[i].numinos = 0; /* number of backed inodes */ imap->im_agctl[i].numfree = 0; /* number of free backed inodes */ } /* * process each iag page of the map. * * rebuild AG Free Inode List, AG Free Inode Extent List; */ for (i = 0; i < imap->im_nextiag; i++) { if ((rc = diIAGRead(imap, i, &bp))) { rcx = rc; continue; } iagp = (struct iag *) bp->data; if (le32_to_cpu(iagp->iagnum) != i) { release_metapage(bp); jfs_error(ipimap->i_sb, "diExtendFs: unexpected value of iagnum"); return -EIO; } /* leave free iag in the free iag list */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { release_metapage(bp); continue; } /* agstart that computes to the same ag is treated as same; */ agstart = le64_to_cpu(iagp->agstart); /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */ n = agstart >> mp->db_agl2size; /* compute backed inodes */ numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) << L2INOSPEREXT; if (numinos > 0) { /* merge AG backed inodes */ imap->im_agctl[n].numinos += numinos; xnuminos += numinos; } /* if any backed free inodes, insert at AG free inode list */ if ((int) le32_to_cpu(iagp->nfreeinos) > 0) { if ((head = imap->im_agctl[n].inofree) == -1) { iagp->inofreefwd = cpu_to_le32(-1); iagp->inofreeback = cpu_to_le32(-1); } else { if ((rc = diIAGRead(imap, head, &hbp))) { rcx = rc; goto nextiag; } hiagp = (struct iag *) hbp->data; hiagp->inofreeback = iagp->iagnum; iagp->inofreefwd = cpu_to_le32(head); iagp->inofreeback = cpu_to_le32(-1); write_metapage(hbp); } imap->im_agctl[n].inofree = le32_to_cpu(iagp->iagnum); /* merge AG backed free inodes */ imap->im_agctl[n].numfree += le32_to_cpu(iagp->nfreeinos); xnumfree += le32_to_cpu(iagp->nfreeinos); } /* if any free extents, insert at AG free extent list */ if (le32_to_cpu(iagp->nfreeexts) > 0) { if ((head = imap->im_agctl[n].extfree) == -1) { iagp->extfreefwd = cpu_to_le32(-1); iagp->extfreeback = cpu_to_le32(-1); } else { if ((rc = diIAGRead(imap, head, &hbp))) { rcx = rc; goto nextiag; } hiagp = (struct iag *) hbp->data; hiagp->extfreeback = iagp->iagnum; iagp->extfreefwd = cpu_to_le32(head); iagp->extfreeback = cpu_to_le32(-1); write_metapage(hbp); } imap->im_agctl[n].extfree = le32_to_cpu(iagp->iagnum); } nextiag: write_metapage(bp); } if (xnuminos != atomic_read(&imap->im_numinos) || xnumfree != atomic_read(&imap->im_numfree)) { jfs_error(ipimap->i_sb, "diExtendFs: numinos or numfree incorrect"); return -EIO; } return rcx; } /* * duplicateIXtree() * * serialization: IWRITE_LOCK held on entry/exit * * note: shadow page with regular inode (rel.2); */ static void duplicateIXtree(struct super_block *sb, s64 blkno, int xlen, s64 *xaddr) { struct jfs_superblock *j_sb; struct buffer_head *bh; struct inode *ip; tid_t tid; /* if AIT2 ipmap2 is bad, do not try to update it */ if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */ return; ip = diReadSpecial(sb, FILESYSTEM_I, 1); if (ip == NULL) { JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT; if (readSuper(sb, &bh)) return; j_sb = (struct jfs_superblock *)bh->b_data; j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT); mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); return; } /* start transaction */ tid = txBegin(sb, COMMIT_FORCE); /* update the inode map addressing structure to point to it */ if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) { JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT; txAbort(tid, 1); goto cleanup; } /* update the inode map's inode to reflect the extension */ ip->i_size += PSIZE; inode_add_bytes(ip, PSIZE); txCommit(tid, 1, &ip, COMMIT_FORCE); cleanup: txEnd(tid); diFreeSpecial(ip); } /* * NAME: copy_from_dinode() * * FUNCTION: Copies inode info from disk inode to in-memory inode * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient memory */ static int copy_from_dinode(struct dinode * dip, struct inode *ip) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); jfs_ip->fileset = le32_to_cpu(dip->di_fileset); jfs_ip->mode2 = le32_to_cpu(dip->di_mode); jfs_set_inode_flags(ip); ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; if (sbi->umask != -1) { ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask); /* For directories, add x permission if r is allowed by umask */ if (S_ISDIR(ip->i_mode)) { if (ip->i_mode & 0400) ip->i_mode |= 0100; if (ip->i_mode & 0040) ip->i_mode |= 0010; if (ip->i_mode & 0004) ip->i_mode |= 0001; } } ip->i_nlink = le32_to_cpu(dip->di_nlink); jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); if (sbi->uid == -1) ip->i_uid = jfs_ip->saved_uid; else { ip->i_uid = sbi->uid; } jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); if (sbi->gid == -1) ip->i_gid = jfs_ip->saved_gid; else { ip->i_gid = sbi->gid; } ip->i_size = le64_to_cpu(dip->di_size); ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec); ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec); ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec); ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec); ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks)); ip->i_generation = le32_to_cpu(dip->di_gen); jfs_ip->ixpxd = dip->di_ixpxd; /* in-memory pxd's are little-endian */ jfs_ip->acl = dip->di_acl; /* as are dxd's */ jfs_ip->ea = dip->di_ea; jfs_ip->next_index = le32_to_cpu(dip->di_next_index); jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec); jfs_ip->acltype = le32_to_cpu(dip->di_acltype); if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) { jfs_ip->dev = le32_to_cpu(dip->di_rdev); ip->i_rdev = new_decode_dev(jfs_ip->dev); } if (S_ISDIR(ip->i_mode)) { memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384); } else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) { memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288); } else memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128); /* Zero the in-memory-only stuff */ jfs_ip->cflag = 0; jfs_ip->btindex = 0; jfs_ip->btorder = 0; jfs_ip->bxflag = 0; jfs_ip->blid = 0; jfs_ip->atlhead = 0; jfs_ip->atltail = 0; jfs_ip->xtlid = 0; return (0); } /* * NAME: copy_to_dinode() * * FUNCTION: Copies inode info from in-memory inode to disk inode */ static void copy_to_dinode(struct dinode * dip, struct inode *ip) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); dip->di_fileset = cpu_to_le32(jfs_ip->fileset); dip->di_inostamp = cpu_to_le32(sbi->inostamp); dip->di_number = cpu_to_le32(ip->i_ino); dip->di_gen = cpu_to_le32(ip->i_generation); dip->di_size = cpu_to_le64(ip->i_size); dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); dip->di_nlink = cpu_to_le32(ip->i_nlink); if (sbi->uid == -1) dip->di_uid = cpu_to_le32(ip->i_uid); else dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); if (sbi->gid == -1) dip->di_gid = cpu_to_le32(ip->i_gid); else dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); jfs_get_inode_flags(jfs_ip); /* * mode2 is only needed for storing the higher order bits. * Trust i_mode for the lower order ones */ if (sbi->umask == -1) dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode); else /* Leave the original permissions alone */ dip->di_mode = cpu_to_le32(jfs_ip->mode2); dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec); dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec); dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec); dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */ dip->di_acl = jfs_ip->acl; /* as are dxd's */ dip->di_ea = jfs_ip->ea; dip->di_next_index = cpu_to_le32(jfs_ip->next_index); dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime); dip->di_otime.tv_nsec = 0; dip->di_acltype = cpu_to_le32(jfs_ip->acltype); if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) dip->di_rdev = cpu_to_le32(jfs_ip->dev); }
gpl-2.0
Nihhaar/android_kernel_xiaomi_mocha
arch/arm/mach-imx/devices/platform-flexcan.c
2212
1716
/* * Copyright (C) 2010 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_flexcan_data_entry_single(soc, _id, _hwid, _size) \ { \ .id = _id, \ .iobase = soc ## _CAN ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_CAN ## _hwid, \ } #define imx_flexcan_data_entry(soc, _id, _hwid, _size) \ [_id] = imx_flexcan_data_entry_single(soc, _id, _hwid, _size) #ifdef CONFIG_SOC_IMX25 const struct imx_flexcan_data imx25_flexcan_data[] __initconst = { #define imx25_flexcan_data_entry(_id, _hwid) \ imx_flexcan_data_entry(MX25, _id, _hwid, SZ_16K) imx25_flexcan_data_entry(0, 1), imx25_flexcan_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX35 const struct imx_flexcan_data imx35_flexcan_data[] __initconst = { #define imx35_flexcan_data_entry(_id, _hwid) \ imx_flexcan_data_entry(MX35, _id, _hwid, SZ_16K) imx35_flexcan_data_entry(0, 1), imx35_flexcan_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_flexcan( const struct imx_flexcan_data *data, const struct flexcan_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("flexcan", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
NinjahMeh/android_kernel_huawei_angler
sound/pci/ice1712/prodigy192.c
2724
22593
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for AudioTrak Prodigy 192 cards * Supported IEC958 input from optional MI/ODI/O add-on card. * * Specifics (SW, HW): * ------------------- * * 49.5MHz crystal * * SPDIF-OUT on the card: * - coax (through isolation transformer)/toslink supplied by * 74HC04 gates - 3 in parallel * - output switched between on-board CD drive dig-out connector * and ice1724 SPDTX pin, using 74HC02 NOR gates, controlled * by GPIO20 (0 = CD dig-out, 1 = SPDTX) * * SPDTX goes straight to MI/ODI/O card's SPDIF-OUT coax * * * MI/ODI/O card: AK4114 based, used for iec958 input only * - toslink input -> RX0 * - coax input -> RX1 * - 4wire protocol: * AK4114 ICE1724 * ------------------------------ * CDTO (pin 32) -- GPIO11 pin 86 * CDTI (pin 33) -- GPIO10 pin 77 * CCLK (pin 34) -- GPIO9 pin 76 * CSN (pin 35) -- GPIO8 pin 75 * - output data Mode 7 (24bit, I2S, slave) * - both MCKO1 and MCKO2 of ak4114 are fed to FPGA, which * outputs master clock to SPMCLKIN of ice1724. * Experimentally I found out that only a combination of * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct * sampling rate. That means the the FPGA doubles the * MCK01 rate. * * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de> * Copyright (c) 2003 Dimitromanolakis Apostolos <apostol@cs.utoronto.ca> * Copyright (c) 2004 Kouichi ONO <co2b@ceres.dti.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include "ice1712.h" #include "envy24ht.h" #include "prodigy192.h" #include "stac946x.h" #include <sound/tlv.h> struct prodigy192_spec { struct ak4114 *ak4114; /* rate change needs atomic mute/unmute of all dacs*/ struct mutex mute_mutex; }; static inline void stac9460_put(struct snd_ice1712 *ice, int reg, unsigned char val) { snd_vt1724_write_i2c(ice, PRODIGY192_STAC9460_ADDR, reg, val); } static inline unsigned char stac9460_get(struct snd_ice1712 *ice, int reg) { return snd_vt1724_read_i2c(ice, PRODIGY192_STAC9460_ADDR, reg); } /* * DAC mute control */ /* * idx = STAC9460 volume register number, mute: 0 = mute, 1 = unmute */ static int stac9460_dac_mute(struct snd_ice1712 *ice, int idx, unsigned char mute) { unsigned char new, old; int change; old = stac9460_get(ice, idx); new = (~mute << 7 & 0x80) | (old & ~0x80); change = (new != old); if (change) /*printk ("Volume register 0x%02x: 0x%02x\n", idx, new);*/ stac9460_put(ice, idx, new); return change; } #define stac9460_dac_mute_info snd_ctl_boolean_mono_info static int stac9460_dac_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char val; int idx; if (kcontrol->private_value) idx = STAC946X_MASTER_VOLUME; else idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + STAC946X_LF_VOLUME; val = stac9460_get(ice, idx); ucontrol->value.integer.value[0] = (~val >> 7) & 0x1; return 0; } static int stac9460_dac_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy192_spec *spec = ice->spec; int idx, change; if (kcontrol->private_value) idx = STAC946X_MASTER_VOLUME; else idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + STAC946X_LF_VOLUME; /* due to possible conflicts with stac9460_set_rate_val, mutexing */ mutex_lock(&spec->mute_mutex); /* printk(KERN_DEBUG "Mute put: reg 0x%02x, ctrl value: 0x%02x\n", idx, ucontrol->value.integer.value[0]); */ change = stac9460_dac_mute(ice, idx, ucontrol->value.integer.value[0]); mutex_unlock(&spec->mute_mutex); return change; } /* * DAC volume attenuation mixer control */ static int stac9460_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = 0x7f; /* 0dB */ return 0; } static int stac9460_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx; unsigned char vol; if (kcontrol->private_value) idx = STAC946X_MASTER_VOLUME; else idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + STAC946X_LF_VOLUME; vol = stac9460_get(ice, idx) & 0x7f; ucontrol->value.integer.value[0] = 0x7f - vol; return 0; } static int stac9460_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx; unsigned char tmp, ovol, nvol; int change; if (kcontrol->private_value) idx = STAC946X_MASTER_VOLUME; else idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + STAC946X_LF_VOLUME; nvol = ucontrol->value.integer.value[0]; tmp = stac9460_get(ice, idx); ovol = 0x7f - (tmp & 0x7f); change = (ovol != nvol); if (change) { ovol = (0x7f - nvol) | (tmp & 0x80); /* printk(KERN_DEBUG "DAC Volume: reg 0x%02x: 0x%02x\n", idx, ovol); */ stac9460_put(ice, idx, (0x7f - nvol) | (tmp & 0x80)); } return change; } /* * ADC mute control */ #define stac9460_adc_mute_info snd_ctl_boolean_stereo_info static int stac9460_adc_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char val; int i; for (i = 0; i < 2; ++i) { val = stac9460_get(ice, STAC946X_MIC_L_VOLUME + i); ucontrol->value.integer.value[i] = ~val>>7 & 0x1; } return 0; } static int stac9460_adc_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char new, old; int i, reg; int change; for (i = 0; i < 2; ++i) { reg = STAC946X_MIC_L_VOLUME + i; old = stac9460_get(ice, reg); new = (~ucontrol->value.integer.value[i]<<7&0x80) | (old&~0x80); change = (new != old); if (change) stac9460_put(ice, reg, new); } return change; } /* * ADC gain mixer control */ static int stac9460_adc_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* 0dB */ uinfo->value.integer.max = 0x0f; /* 22.5dB */ return 0; } static int stac9460_adc_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int i, reg; unsigned char vol; for (i = 0; i < 2; ++i) { reg = STAC946X_MIC_L_VOLUME + i; vol = stac9460_get(ice, reg) & 0x0f; ucontrol->value.integer.value[i] = 0x0f - vol; } return 0; } static int stac9460_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int i, reg; unsigned char ovol, nvol; int change; for (i = 0; i < 2; ++i) { reg = STAC946X_MIC_L_VOLUME + i; nvol = ucontrol->value.integer.value[i] & 0x0f; ovol = 0x0f - stac9460_get(ice, reg); change = ((ovol & 0x0f) != nvol); if (change) stac9460_put(ice, reg, (0x0f - nvol) | (ovol & ~0x0f)); } return change; } static int stac9460_mic_sw_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[2] = { "Line In", "Mic" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 2; if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int stac9460_mic_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char val; val = stac9460_get(ice, STAC946X_GENERAL_PURPOSE); ucontrol->value.enumerated.item[0] = (val >> 7) & 0x1; return 0; } static int stac9460_mic_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char new, old; int change; old = stac9460_get(ice, STAC946X_GENERAL_PURPOSE); new = (ucontrol->value.enumerated.item[0] << 7 & 0x80) | (old & ~0x80); change = (new != old); if (change) stac9460_put(ice, STAC946X_GENERAL_PURPOSE, new); return change; } /* * Handler for setting correct codec rate - called when rate change is detected */ static void stac9460_set_rate_val(struct snd_ice1712 *ice, unsigned int rate) { unsigned char old, new; int idx; unsigned char changed[7]; struct prodigy192_spec *spec = ice->spec; if (rate == 0) /* no hint - S/PDIF input is master, simply return */ return; else if (rate <= 48000) new = 0x08; /* 256x, base rate mode */ else if (rate <= 96000) new = 0x11; /* 256x, mid rate mode */ else new = 0x12; /* 128x, high rate mode */ old = stac9460_get(ice, STAC946X_MASTER_CLOCKING); if (old == new) return; /* change detected, setting master clock, muting first */ /* due to possible conflicts with mute controls - mutexing */ mutex_lock(&spec->mute_mutex); /* we have to remember current mute status for each DAC */ for (idx = 0; idx < 7 ; ++idx) changed[idx] = stac9460_dac_mute(ice, STAC946X_MASTER_VOLUME + idx, 0); /*printk(KERN_DEBUG "Rate change: %d, new MC: 0x%02x\n", rate, new);*/ stac9460_put(ice, STAC946X_MASTER_CLOCKING, new); udelay(10); /* unmuting - only originally unmuted dacs - * i.e. those changed when muting */ for (idx = 0; idx < 7 ; ++idx) { if (changed[idx]) stac9460_dac_mute(ice, STAC946X_MASTER_VOLUME + idx, 1); } mutex_unlock(&spec->mute_mutex); } static const DECLARE_TLV_DB_SCALE(db_scale_dac, -19125, 75, 0); static const DECLARE_TLV_DB_SCALE(db_scale_adc, 0, 150, 0); /* * mixers */ static struct snd_kcontrol_new stac_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = stac9460_dac_mute_info, .get = stac9460_dac_mute_get, .put = stac9460_dac_mute_put, .private_value = 1, .tlv = { .p = db_scale_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Master Playback Volume", .info = stac9460_dac_vol_info, .get = stac9460_dac_vol_get, .put = stac9460_dac_vol_put, .private_value = 1, .tlv = { .p = db_scale_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Switch", .count = 6, .info = stac9460_dac_mute_info, .get = stac9460_dac_mute_get, .put = stac9460_dac_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "DAC Volume", .count = 6, .info = stac9460_dac_vol_info, .get = stac9460_dac_vol_get, .put = stac9460_dac_vol_put, .tlv = { .p = db_scale_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Capture Switch", .count = 1, .info = stac9460_adc_mute_info, .get = stac9460_adc_mute_get, .put = stac9460_adc_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "ADC Capture Volume", .count = 1, .info = stac9460_adc_vol_info, .get = stac9460_adc_vol_get, .put = stac9460_adc_vol_put, .tlv = { .p = db_scale_adc } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Capture Input", .info = stac9460_mic_sw_info, .get = stac9460_mic_sw_get, .put = stac9460_mic_sw_put, }, }; /* AK4114 - ICE1724 connections on Prodigy192 + MI/ODI/O */ /* CDTO (pin 32) -- GPIO11 pin 86 * CDTI (pin 33) -- GPIO10 pin 77 * CCLK (pin 34) -- GPIO9 pin 76 * CSN (pin 35) -- GPIO8 pin 75 */ #define AK4114_ADDR 0x00 /* C1-C0: Chip Address * (According to datasheet fixed to “00”) */ /* * 4wire ak4114 protocol - writing data */ static void write_data(struct snd_ice1712 *ice, unsigned int gpio, unsigned int data, int idx) { for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_PRODIGY192_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* set data */ if (data & (1 << idx)) gpio |= VT1724_PRODIGY192_CDOUT; else gpio &= ~VT1724_PRODIGY192_CDOUT; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* raise clock */ gpio |= VT1724_PRODIGY192_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } } /* * 4wire ak4114 protocol - reading data */ static unsigned char read_data(struct snd_ice1712 *ice, unsigned int gpio, int idx) { unsigned char data = 0; for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_PRODIGY192_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* read data */ if (snd_ice1712_gpio_read(ice) & VT1724_PRODIGY192_CDIN) data |= (1 << idx); udelay(1); /* raise clock */ gpio |= VT1724_PRODIGY192_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } return data; } /* * 4wire ak4114 protocol - starting sequence */ static unsigned int prodigy192_4wire_start(struct snd_ice1712 *ice) { unsigned int tmp; snd_ice1712_save_gpio_status(ice); tmp = snd_ice1712_gpio_read(ice); tmp |= VT1724_PRODIGY192_CCLK; /* high at init */ tmp &= ~VT1724_PRODIGY192_CS; /* drop chip select */ snd_ice1712_gpio_write(ice, tmp); udelay(1); return tmp; } /* * 4wire ak4114 protocol - final sequence */ static void prodigy192_4wire_finish(struct snd_ice1712 *ice, unsigned int tmp) { tmp |= VT1724_PRODIGY192_CS; /* raise chip select */ snd_ice1712_gpio_write(ice, tmp); udelay(1); snd_ice1712_restore_gpio_status(ice); } /* * Write data to addr register of ak4114 */ static void prodigy192_ak4114_write(void *private_data, unsigned char addr, unsigned char data) { struct snd_ice1712 *ice = private_data; unsigned int tmp, addrdata; tmp = prodigy192_4wire_start(ice); addrdata = (AK4114_ADDR << 6) | 0x20 | (addr & 0x1f); addrdata = (addrdata << 8) | data; write_data(ice, tmp, addrdata, 15); prodigy192_4wire_finish(ice, tmp); } /* * Read data from addr register of ak4114 */ static unsigned char prodigy192_ak4114_read(void *private_data, unsigned char addr) { struct snd_ice1712 *ice = private_data; unsigned int tmp; unsigned char data; tmp = prodigy192_4wire_start(ice); write_data(ice, tmp, (AK4114_ADDR << 6) | (addr & 0x1f), 7); data = read_data(ice, tmp, 7); prodigy192_4wire_finish(ice, tmp); return data; } static int ak4114_input_sw_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[2] = { "Toslink", "Coax" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 2; if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int ak4114_input_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char val; val = prodigy192_ak4114_read(ice, AK4114_REG_IO1); /* AK4114_IPS0 bit = 0 -> RX0 = Toslink * AK4114_IPS0 bit = 1 -> RX1 = Coax */ ucontrol->value.enumerated.item[0] = (val & AK4114_IPS0) ? 1 : 0; return 0; } static int ak4114_input_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char new, old, itemvalue; int change; old = prodigy192_ak4114_read(ice, AK4114_REG_IO1); /* AK4114_IPS0 could be any bit */ itemvalue = (ucontrol->value.enumerated.item[0]) ? 0xff : 0x00; new = (itemvalue & AK4114_IPS0) | (old & ~AK4114_IPS0); change = (new != old); if (change) prodigy192_ak4114_write(ice, AK4114_REG_IO1, new); return change; } static struct snd_kcontrol_new ak4114_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "MIODIO IEC958 Capture Input", .info = ak4114_input_sw_info, .get = ak4114_input_sw_get, .put = ak4114_input_sw_put, } }; static int prodigy192_ak4114_init(struct snd_ice1712 *ice) { static const unsigned char ak4114_init_vals[] = { AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1, /* ice1724 expects I2S and provides clock, * DEM0 disables the deemphasis filter */ AK4114_DIF_I24I2S | AK4114_DEM0 , AK4114_TX1E, AK4114_EFH_1024 | AK4114_DIT, /* default input RX0 */ 0, 0 }; static const unsigned char ak4114_init_txcsb[] = { 0x41, 0x02, 0x2c, 0x00, 0x00 }; struct prodigy192_spec *spec = ice->spec; int err; err = snd_ak4114_create(ice->card, prodigy192_ak4114_read, prodigy192_ak4114_write, ak4114_init_vals, ak4114_init_txcsb, ice, &spec->ak4114); if (err < 0) return err; /* AK4114 in Prodigy192 cannot detect external rate correctly. * No reason to stop capture stream due to incorrect checks */ spec->ak4114->check_flags = AK4114_CHECK_NO_RATE; return 0; } static void stac9460_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; int reg, val; /* registers 0x0 - 0x14 */ for (reg = 0; reg <= 0x15; reg++) { val = stac9460_get(ice, reg); snd_iprintf(buffer, "0x%02x = 0x%02x\n", reg, val); } } static void stac9460_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "stac9460_codec", &entry)) snd_info_set_text_ops(entry, ice, stac9460_proc_regs_read); } static int prodigy192_add_controls(struct snd_ice1712 *ice) { struct prodigy192_spec *spec = ice->spec; unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(stac_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&stac_controls[i], ice)); if (err < 0) return err; } if (spec->ak4114) { /* ak4114 is connected */ for (i = 0; i < ARRAY_SIZE(ak4114_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&ak4114_controls[i], ice)); if (err < 0) return err; } err = snd_ak4114_build(spec->ak4114, NULL, /* ak4114 in MIO/DI/O handles no IEC958 output */ ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); if (err < 0) return err; } stac9460_proc_init(ice); return 0; } /* * check for presence of MI/ODI/O add-on card with digital inputs */ static int prodigy192_miodio_exists(struct snd_ice1712 *ice) { unsigned char orig_value; const unsigned char test_data = 0xd1; /* random value */ unsigned char addr = AK4114_REG_INT0_MASK; /* random SAFE address */ int exists = 0; orig_value = prodigy192_ak4114_read(ice, addr); prodigy192_ak4114_write(ice, addr, test_data); if (prodigy192_ak4114_read(ice, addr) == test_data) { /* ak4114 seems to communicate, apparently exists */ /* writing back original value */ prodigy192_ak4114_write(ice, addr, orig_value); exists = 1; } return exists; } /* * initialize the chip */ static int prodigy192_init(struct snd_ice1712 *ice) { static const unsigned short stac_inits_prodigy[] = { STAC946X_RESET, 0, STAC946X_MASTER_CLOCKING, 0x11, /* STAC946X_MASTER_VOLUME, 0, STAC946X_LF_VOLUME, 0, STAC946X_RF_VOLUME, 0, STAC946X_LR_VOLUME, 0, STAC946X_RR_VOLUME, 0, STAC946X_CENTER_VOLUME, 0, STAC946X_LFE_VOLUME, 0,*/ (unsigned short)-1 }; const unsigned short *p; int err = 0; struct prodigy192_spec *spec; /* prodigy 192 */ ice->num_total_dacs = 6; ice->num_total_adcs = 2; ice->vt1720 = 0; /* ice1724, e.g. 23 GPIOs */ spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; mutex_init(&spec->mute_mutex); /* initialize codec */ p = stac_inits_prodigy; for (; *p != (unsigned short)-1; p += 2) stac9460_put(ice, p[0], p[1]); ice->gpio.set_pro_rate = stac9460_set_rate_val; /* MI/ODI/O add on card with AK4114 */ if (prodigy192_miodio_exists(ice)) { err = prodigy192_ak4114_init(ice); /* from this moment if err = 0 then * spec->ak4114 should not be null */ snd_printdd("AK4114 initialized with status %d\n", err); } else snd_printdd("AK4114 not found\n"); if (err < 0) return err; return 0; } /* * Aureon boards don't provide the EEPROM data except for the vendor IDs. * hence the driver needs to sets up it properly. */ static unsigned char prodigy71_eeprom[] = { [ICE_EEP2_SYSCONF] = 0x6a, /* 49MHz crystal, mpu401, * spdif-in+ 1 stereo ADC, * 3 stereo DACs */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */ [ICE_EEP2_GPIO_DIR] = 0xff, [ICE_EEP2_GPIO_DIR1] = ~(VT1724_PRODIGY192_CDIN >> 8) , [ICE_EEP2_GPIO_DIR2] = 0xbf, [ICE_EEP2_GPIO_MASK] = 0x00, [ICE_EEP2_GPIO_MASK1] = 0x00, [ICE_EEP2_GPIO_MASK2] = 0x00, [ICE_EEP2_GPIO_STATE] = 0x00, [ICE_EEP2_GPIO_STATE1] = 0x00, [ICE_EEP2_GPIO_STATE2] = 0x10, /* GPIO20: 0 = CD drive dig. input * passthrough, * 1 = SPDIF-OUT from ice1724 */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_prodigy192_cards[] = { { .subvendor = VT1724_SUBDEVICE_PRODIGY192VE, .name = "Audiotrak Prodigy 192", .model = "prodigy192", .chip_init = prodigy192_init, .build_controls = prodigy192_add_controls, .eeprom_size = sizeof(prodigy71_eeprom), .eeprom_data = prodigy71_eeprom, }, { } /* terminator */ };
gpl-2.0
djvoleur/test
drivers/scsi/dmx3191d.c
3236
4522
/* dmx3191d.c - driver for the Domex DMX3191D SCSI card. Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it> Portions Copyright (C) 2004 by Christoph Hellwig <hch@lst.de> Based on the generic NCR5380 driver by Drew Eckhardt et al. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/io.h> #include <scsi/scsi_host.h> /* * Definitions for the generic 5380 driver. */ #define AUTOSENSE #define NCR5380_read(reg) inb(port + reg) #define NCR5380_write(reg, value) outb(value, port + reg) #define NCR5380_implementation_fields unsigned int port #define NCR5380_local_declare() NCR5380_implementation_fields #define NCR5380_setup(instance) port = instance->io_port /* * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) */ #include <linux/delay.h> #include "scsi.h" #include "NCR5380.h" #include "NCR5380.c" #define DMX3191D_DRIVER_NAME "dmx3191d" #define DMX3191D_REGION_LEN 8 static struct scsi_host_template dmx3191d_driver_template = { .proc_name = DMX3191D_DRIVER_NAME, .name = "Domex DMX3191D", .queuecommand = NCR5380_queue_command, .eh_abort_handler = NCR5380_abort, .eh_bus_reset_handler = NCR5380_bus_reset, .can_queue = 32, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, }; static int dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; shost->io_port = io; shost->irq = pdev->irq; NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, DMX3191D_DRIVER_NAME, shost)) { /* * Steam powered scsi controllers run without an IRQ anyway */ printk(KERN_WARNING "dmx3191: IRQ %d not available - " "switching to polled mode.\n", pdev->irq); shost->irq = SCSI_IRQ_NONE; } pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_free_irq; scsi_scan_host(shost); return 0; out_free_irq: free_irq(shost->irq, shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; } static void dmx3191d_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); scsi_remove_host(shost); NCR5380_exit(shost); if (shost->irq != SCSI_IRQ_NONE) free_irq(shost->irq, shost); release_region(shost->io_port, DMX3191D_REGION_LEN); pci_disable_device(pdev); scsi_host_put(shost); } static struct pci_device_id dmx3191d_pci_tbl[] = { {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, { } }; MODULE_DEVICE_TABLE(pci, dmx3191d_pci_tbl); static struct pci_driver dmx3191d_pci_driver = { .name = DMX3191D_DRIVER_NAME, .id_table = dmx3191d_pci_tbl, .probe = dmx3191d_probe_one, .remove = dmx3191d_remove_one, }; static int __init dmx3191d_init(void) { return pci_register_driver(&dmx3191d_pci_driver); } static void __exit dmx3191d_exit(void) { pci_unregister_driver(&dmx3191d_pci_driver); } module_init(dmx3191d_init); module_exit(dmx3191d_exit); MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); MODULE_LICENSE("GPL");
gpl-2.0
darchstar/kernel-heroc-2.6.32
sound/drivers/mpu401/mpu401_uart.c
3748
16837
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of MPU-401 in UART mode * * MPU-401 supports UART mode which is not capable generate transmit * interrupts thus output is done via polling. Also, if irq < 0, then * input is done also via polling. Do not expect good performance. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * 13-03-2003: * Added support for different kind of hardware I/O. Build in choices * are port and mmio. For other kind of I/O, set mpu->read and * mpu->write to your own I/O functions. * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <sound/core.h> #include <sound/mpu401.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of MPU-401 in UART mode"); MODULE_LICENSE("GPL"); static void snd_mpu401_uart_input_read(struct snd_mpu401 * mpu); static void snd_mpu401_uart_output_write(struct snd_mpu401 * mpu); /* */ #define snd_mpu401_input_avail(mpu) \ (!(mpu->read(mpu, MPU401C(mpu)) & MPU401_RX_EMPTY)) #define snd_mpu401_output_ready(mpu) \ (!(mpu->read(mpu, MPU401C(mpu)) & MPU401_TX_FULL)) /* Build in lowlevel io */ static void mpu401_write_port(struct snd_mpu401 *mpu, unsigned char data, unsigned long addr) { outb(data, addr); } static unsigned char mpu401_read_port(struct snd_mpu401 *mpu, unsigned long addr) { return inb(addr); } static void mpu401_write_mmio(struct snd_mpu401 *mpu, unsigned char data, unsigned long addr) { writeb(data, (void __iomem *)addr); } static unsigned char mpu401_read_mmio(struct snd_mpu401 *mpu, unsigned long addr) { return readb((void __iomem *)addr); } /* */ static void snd_mpu401_uart_clear_rx(struct snd_mpu401 *mpu) { int timeout = 100000; for (; timeout > 0 && snd_mpu401_input_avail(mpu); timeout--) mpu->read(mpu, MPU401D(mpu)); #ifdef CONFIG_SND_DEBUG if (timeout <= 0) snd_printk(KERN_ERR "cmd: clear rx timeout (status = 0x%x)\n", mpu->read(mpu, MPU401C(mpu))); #endif } static void uart_interrupt_tx(struct snd_mpu401 *mpu) { unsigned long flags; if (test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode) && test_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode)) { spin_lock_irqsave(&mpu->output_lock, flags); snd_mpu401_uart_output_write(mpu); spin_unlock_irqrestore(&mpu->output_lock, flags); } } static void _snd_mpu401_uart_interrupt(struct snd_mpu401 *mpu) { unsigned long flags; if (mpu->info_flags & MPU401_INFO_INPUT) { spin_lock_irqsave(&mpu->input_lock, flags); if (test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode)) snd_mpu401_uart_input_read(mpu); else snd_mpu401_uart_clear_rx(mpu); spin_unlock_irqrestore(&mpu->input_lock, flags); } if (! (mpu->info_flags & MPU401_INFO_TX_IRQ)) /* ok. for better Tx performance try do some output when input is done */ uart_interrupt_tx(mpu); } /** * snd_mpu401_uart_interrupt - generic MPU401-UART interrupt handler * @irq: the irq number * @dev_id: mpu401 instance * * Processes the interrupt for MPU401-UART i/o. */ irqreturn_t snd_mpu401_uart_interrupt(int irq, void *dev_id) { struct snd_mpu401 *mpu = dev_id; if (mpu == NULL) return IRQ_NONE; _snd_mpu401_uart_interrupt(mpu); return IRQ_HANDLED; } EXPORT_SYMBOL(snd_mpu401_uart_interrupt); /** * snd_mpu401_uart_interrupt_tx - generic MPU401-UART transmit irq handler * @irq: the irq number * @dev_id: mpu401 instance * * Processes the interrupt for MPU401-UART output. */ irqreturn_t snd_mpu401_uart_interrupt_tx(int irq, void *dev_id) { struct snd_mpu401 *mpu = dev_id; if (mpu == NULL) return IRQ_NONE; uart_interrupt_tx(mpu); return IRQ_HANDLED; } EXPORT_SYMBOL(snd_mpu401_uart_interrupt_tx); /* * timer callback * reprogram the timer and call the interrupt job */ static void snd_mpu401_uart_timer(unsigned long data) { struct snd_mpu401 *mpu = (struct snd_mpu401 *)data; unsigned long flags; spin_lock_irqsave(&mpu->timer_lock, flags); /*mpu->mode |= MPU401_MODE_TIMER;*/ mpu->timer.expires = 1 + jiffies; add_timer(&mpu->timer); spin_unlock_irqrestore(&mpu->timer_lock, flags); if (mpu->rmidi) _snd_mpu401_uart_interrupt(mpu); } /* * initialize the timer callback if not programmed yet */ static void snd_mpu401_uart_add_timer (struct snd_mpu401 *mpu, int input) { unsigned long flags; spin_lock_irqsave (&mpu->timer_lock, flags); if (mpu->timer_invoked == 0) { init_timer(&mpu->timer); mpu->timer.data = (unsigned long)mpu; mpu->timer.function = snd_mpu401_uart_timer; mpu->timer.expires = 1 + jiffies; add_timer(&mpu->timer); } mpu->timer_invoked |= input ? MPU401_MODE_INPUT_TIMER : MPU401_MODE_OUTPUT_TIMER; spin_unlock_irqrestore (&mpu->timer_lock, flags); } /* * remove the timer callback if still active */ static void snd_mpu401_uart_remove_timer (struct snd_mpu401 *mpu, int input) { unsigned long flags; spin_lock_irqsave (&mpu->timer_lock, flags); if (mpu->timer_invoked) { mpu->timer_invoked &= input ? ~MPU401_MODE_INPUT_TIMER : ~MPU401_MODE_OUTPUT_TIMER; if (! mpu->timer_invoked) del_timer(&mpu->timer); } spin_unlock_irqrestore (&mpu->timer_lock, flags); } /* * send a UART command * return zero if successful, non-zero for some errors */ static int snd_mpu401_uart_cmd(struct snd_mpu401 * mpu, unsigned char cmd, int ack) { unsigned long flags; int timeout, ok; spin_lock_irqsave(&mpu->input_lock, flags); if (mpu->hardware != MPU401_HW_TRID4DWAVE) { mpu->write(mpu, 0x00, MPU401D(mpu)); /*snd_mpu401_uart_clear_rx(mpu);*/ } /* ok. standard MPU-401 initialization */ if (mpu->hardware != MPU401_HW_SB) { for (timeout = 1000; timeout > 0 && !snd_mpu401_output_ready(mpu); timeout--) udelay(10); #ifdef CONFIG_SND_DEBUG if (!timeout) snd_printk(KERN_ERR "cmd: tx timeout (status = 0x%x)\n", mpu->read(mpu, MPU401C(mpu))); #endif } mpu->write(mpu, cmd, MPU401C(mpu)); if (ack && !(mpu->info_flags & MPU401_INFO_NO_ACK)) { ok = 0; timeout = 10000; while (!ok && timeout-- > 0) { if (snd_mpu401_input_avail(mpu)) { if (mpu->read(mpu, MPU401D(mpu)) == MPU401_ACK) ok = 1; } } if (!ok && mpu->read(mpu, MPU401D(mpu)) == MPU401_ACK) ok = 1; } else ok = 1; spin_unlock_irqrestore(&mpu->input_lock, flags); if (!ok) { snd_printk(KERN_ERR "cmd: 0x%x failed at 0x%lx " "(status = 0x%x, data = 0x%x)\n", cmd, mpu->port, mpu->read(mpu, MPU401C(mpu)), mpu->read(mpu, MPU401D(mpu))); return 1; } return 0; } static int snd_mpu401_do_reset(struct snd_mpu401 *mpu) { if (snd_mpu401_uart_cmd(mpu, MPU401_RESET, 1)) return -EIO; if (snd_mpu401_uart_cmd(mpu, MPU401_ENTER_UART, 0)) return -EIO; return 0; } /* * input/output open/close - protected by open_mutex in rawmidi.c */ static int snd_mpu401_uart_input_open(struct snd_rawmidi_substream *substream) { struct snd_mpu401 *mpu; int err; mpu = substream->rmidi->private_data; if (mpu->open_input && (err = mpu->open_input(mpu)) < 0) return err; if (! test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode)) { if (snd_mpu401_do_reset(mpu) < 0) goto error_out; } mpu->substream_input = substream; set_bit(MPU401_MODE_BIT_INPUT, &mpu->mode); return 0; error_out: if (mpu->open_input && mpu->close_input) mpu->close_input(mpu); return -EIO; } static int snd_mpu401_uart_output_open(struct snd_rawmidi_substream *substream) { struct snd_mpu401 *mpu; int err; mpu = substream->rmidi->private_data; if (mpu->open_output && (err = mpu->open_output(mpu)) < 0) return err; if (! test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode)) { if (snd_mpu401_do_reset(mpu) < 0) goto error_out; } mpu->substream_output = substream; set_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode); return 0; error_out: if (mpu->open_output && mpu->close_output) mpu->close_output(mpu); return -EIO; } static int snd_mpu401_uart_input_close(struct snd_rawmidi_substream *substream) { struct snd_mpu401 *mpu; int err = 0; mpu = substream->rmidi->private_data; clear_bit(MPU401_MODE_BIT_INPUT, &mpu->mode); mpu->substream_input = NULL; if (! test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode)) err = snd_mpu401_uart_cmd(mpu, MPU401_RESET, 0); if (mpu->close_input) mpu->close_input(mpu); if (err) return -EIO; return 0; } static int snd_mpu401_uart_output_close(struct snd_rawmidi_substream *substream) { struct snd_mpu401 *mpu; int err = 0; mpu = substream->rmidi->private_data; clear_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode); mpu->substream_output = NULL; if (! test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode)) err = snd_mpu401_uart_cmd(mpu, MPU401_RESET, 0); if (mpu->close_output) mpu->close_output(mpu); if (err) return -EIO; return 0; } /* * trigger input callback */ static void snd_mpu401_uart_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_mpu401 *mpu; int max = 64; mpu = substream->rmidi->private_data; if (up) { if (! test_and_set_bit(MPU401_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) { /* first time - flush FIFO */ while (max-- > 0) mpu->read(mpu, MPU401D(mpu)); if (mpu->irq < 0) snd_mpu401_uart_add_timer(mpu, 1); } /* read data in advance */ spin_lock_irqsave(&mpu->input_lock, flags); snd_mpu401_uart_input_read(mpu); spin_unlock_irqrestore(&mpu->input_lock, flags); } else { if (mpu->irq < 0) snd_mpu401_uart_remove_timer(mpu, 1); clear_bit(MPU401_MODE_BIT_INPUT_TRIGGER, &mpu->mode); } } /* * transfer input pending data * call with input_lock spinlock held */ static void snd_mpu401_uart_input_read(struct snd_mpu401 * mpu) { int max = 128; unsigned char byte; while (max-- > 0) { if (! snd_mpu401_input_avail(mpu)) break; /* input not available */ byte = mpu->read(mpu, MPU401D(mpu)); if (test_bit(MPU401_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_rawmidi_receive(mpu->substream_input, &byte, 1); } } /* * Tx FIFO sizes: * CS4237B - 16 bytes * AudioDrive ES1688 - 12 bytes * S3 SonicVibes - 8 bytes * SoundBlaster AWE 64 - 2 bytes (ugly hardware) */ /* * write output pending bytes * call with output_lock spinlock held */ static void snd_mpu401_uart_output_write(struct snd_mpu401 * mpu) { unsigned char byte; int max = 256; do { if (snd_rawmidi_transmit_peek(mpu->substream_output, &byte, 1) == 1) { /* * Try twice because there is hardware that insists on * setting the output busy bit after each write. */ if (!snd_mpu401_output_ready(mpu) && !snd_mpu401_output_ready(mpu)) break; /* Tx FIFO full - try again later */ mpu->write(mpu, byte, MPU401D(mpu)); snd_rawmidi_transmit_ack(mpu->substream_output, 1); } else { snd_mpu401_uart_remove_timer (mpu, 0); break; /* no other data - leave the tx loop */ } } while (--max > 0); } /* * output trigger callback */ static void snd_mpu401_uart_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_mpu401 *mpu; mpu = substream->rmidi->private_data; if (up) { set_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode); /* try to add the timer at each output trigger, * since the output timer might have been removed in * snd_mpu401_uart_output_write(). */ if (! (mpu->info_flags & MPU401_INFO_TX_IRQ)) snd_mpu401_uart_add_timer(mpu, 0); /* output pending data */ spin_lock_irqsave(&mpu->output_lock, flags); snd_mpu401_uart_output_write(mpu); spin_unlock_irqrestore(&mpu->output_lock, flags); } else { if (! (mpu->info_flags & MPU401_INFO_TX_IRQ)) snd_mpu401_uart_remove_timer(mpu, 0); clear_bit(MPU401_MODE_BIT_OUTPUT_TRIGGER, &mpu->mode); } } /* */ static struct snd_rawmidi_ops snd_mpu401_uart_output = { .open = snd_mpu401_uart_output_open, .close = snd_mpu401_uart_output_close, .trigger = snd_mpu401_uart_output_trigger, }; static struct snd_rawmidi_ops snd_mpu401_uart_input = { .open = snd_mpu401_uart_input_open, .close = snd_mpu401_uart_input_close, .trigger = snd_mpu401_uart_input_trigger, }; static void snd_mpu401_uart_free(struct snd_rawmidi *rmidi) { struct snd_mpu401 *mpu = rmidi->private_data; if (mpu->irq_flags && mpu->irq >= 0) free_irq(mpu->irq, (void *) mpu); release_and_free_resource(mpu->res); kfree(mpu); } /** * snd_mpu401_uart_new - create an MPU401-UART instance * @card: the card instance * @device: the device index, zero-based * @hardware: the hardware type, MPU401_HW_XXXX * @port: the base address of MPU401 port * @info_flags: bitflags MPU401_INFO_XXX * @irq: the irq number, -1 if no interrupt for mpu * @irq_flags: the irq request flags (SA_XXX), 0 if irq was already reserved. * @rrawmidi: the pointer to store the new rawmidi instance * * Creates a new MPU-401 instance. * * Note that the rawmidi instance is returned on the rrawmidi argument, * not the mpu401 instance itself. To access to the mpu401 instance, * cast from rawmidi->private_data (with struct snd_mpu401 magic-cast). * * Returns zero if successful, or a negative error code. */ int snd_mpu401_uart_new(struct snd_card *card, int device, unsigned short hardware, unsigned long port, unsigned int info_flags, int irq, int irq_flags, struct snd_rawmidi ** rrawmidi) { struct snd_mpu401 *mpu; struct snd_rawmidi *rmidi; int in_enable, out_enable; int err; if (rrawmidi) *rrawmidi = NULL; if (! (info_flags & (MPU401_INFO_INPUT | MPU401_INFO_OUTPUT))) info_flags |= MPU401_INFO_INPUT | MPU401_INFO_OUTPUT; in_enable = (info_flags & MPU401_INFO_INPUT) ? 1 : 0; out_enable = (info_flags & MPU401_INFO_OUTPUT) ? 1 : 0; if ((err = snd_rawmidi_new(card, "MPU-401U", device, out_enable, in_enable, &rmidi)) < 0) return err; mpu = kzalloc(sizeof(*mpu), GFP_KERNEL); if (mpu == NULL) { snd_printk(KERN_ERR "mpu401_uart: cannot allocate\n"); snd_device_free(card, rmidi); return -ENOMEM; } rmidi->private_data = mpu; rmidi->private_free = snd_mpu401_uart_free; spin_lock_init(&mpu->input_lock); spin_lock_init(&mpu->output_lock); spin_lock_init(&mpu->timer_lock); mpu->hardware = hardware; if (! (info_flags & MPU401_INFO_INTEGRATED)) { int res_size = hardware == MPU401_HW_PC98II ? 4 : 2; mpu->res = request_region(port, res_size, "MPU401 UART"); if (mpu->res == NULL) { snd_printk(KERN_ERR "mpu401_uart: " "unable to grab port 0x%lx size %d\n", port, res_size); snd_device_free(card, rmidi); return -EBUSY; } } if (info_flags & MPU401_INFO_MMIO) { mpu->write = mpu401_write_mmio; mpu->read = mpu401_read_mmio; } else { mpu->write = mpu401_write_port; mpu->read = mpu401_read_port; } mpu->port = port; if (hardware == MPU401_HW_PC98II) mpu->cport = port + 2; else mpu->cport = port + 1; if (irq >= 0 && irq_flags) { if (request_irq(irq, snd_mpu401_uart_interrupt, irq_flags, "MPU401 UART", (void *) mpu)) { snd_printk(KERN_ERR "mpu401_uart: " "unable to grab IRQ %d\n", irq); snd_device_free(card, rmidi); return -EBUSY; } } mpu->info_flags = info_flags; mpu->irq = irq; mpu->irq_flags = irq_flags; if (card->shortname[0]) snprintf(rmidi->name, sizeof(rmidi->name), "%s MIDI", card->shortname); else sprintf(rmidi->name, "MPU-401 MIDI %d-%d",card->number, device); if (out_enable) { snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_mpu401_uart_output); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; } if (in_enable) { snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_mpu401_uart_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; if (out_enable) rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX; } mpu->rmidi = rmidi; if (rrawmidi) *rrawmidi = rmidi; return 0; } EXPORT_SYMBOL(snd_mpu401_uart_new); /* * INIT part */ static int __init alsa_mpu401_uart_init(void) { return 0; } static void __exit alsa_mpu401_uart_exit(void) { } module_init(alsa_mpu401_uart_init) module_exit(alsa_mpu401_uart_exit)
gpl-2.0
rickardholmberg/linux-sunxi
drivers/ptp/ptp_pch.c
4772
16597
/* * PTP 1588 clock using the EG20T PCH * * Copyright (C) 2010 OMICRON electronics GmbH * Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD. * * This code was derived from the IXP46X driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ptp_clock_kernel.h> #include <linux/slab.h> #define STATION_ADDR_LEN 20 #define PCI_DEVICE_ID_PCH_1588 0x8819 #define IO_MEM_BAR 1 #define DEFAULT_ADDEND 0xA0000000 #define TICKS_NS_SHIFT 5 #define N_EXT_TS 2 enum pch_status { PCH_SUCCESS, PCH_INVALIDPARAM, PCH_NOTIMESTAMP, PCH_INTERRUPTMODEINUSE, PCH_FAILED, PCH_UNSUPPORTED, }; /** * struct pch_ts_regs - IEEE 1588 registers */ struct pch_ts_regs { u32 control; u32 event; u32 addend; u32 accum; u32 test; u32 ts_compare; u32 rsystime_lo; u32 rsystime_hi; u32 systime_lo; u32 systime_hi; u32 trgt_lo; u32 trgt_hi; u32 asms_lo; u32 asms_hi; u32 amms_lo; u32 amms_hi; u32 ch_control; u32 ch_event; u32 tx_snap_lo; u32 tx_snap_hi; u32 rx_snap_lo; u32 rx_snap_hi; u32 src_uuid_lo; u32 src_uuid_hi; u32 can_status; u32 can_snap_lo; u32 can_snap_hi; u32 ts_sel; u32 ts_st[6]; u32 reserve1[14]; u32 stl_max_set_en; u32 stl_max_set; u32 reserve2[13]; u32 srst; }; #define PCH_TSC_RESET (1 << 0) #define PCH_TSC_TTM_MASK (1 << 1) #define PCH_TSC_ASMS_MASK (1 << 2) #define PCH_TSC_AMMS_MASK (1 << 3) #define PCH_TSC_PPSM_MASK (1 << 4) #define PCH_TSE_TTIPEND (1 << 1) #define PCH_TSE_SNS (1 << 2) #define PCH_TSE_SNM (1 << 3) #define PCH_TSE_PPS (1 << 4) #define PCH_CC_MM (1 << 0) #define PCH_CC_TA (1 << 1) #define PCH_CC_MODE_SHIFT 16 #define PCH_CC_MODE_MASK 0x001F0000 #define PCH_CC_VERSION (1 << 31) #define PCH_CE_TXS (1 << 0) #define PCH_CE_RXS (1 << 1) #define PCH_CE_OVR (1 << 0) #define PCH_CE_VAL (1 << 1) #define PCH_ECS_ETH (1 << 0) #define PCH_ECS_CAN (1 << 1) #define PCH_STATION_BYTES 6 #define PCH_IEEE1588_ETH (1 << 0) #define PCH_IEEE1588_CAN (1 << 1) /** * struct pch_dev - Driver private data */ struct pch_dev { struct pch_ts_regs *regs; struct ptp_clock *ptp_clock; struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; u32 mem_base; u32 mem_size; u32 irq; struct pci_dev *pdev; spinlock_t register_lock; }; /** * struct pch_params - 1588 module parameter */ struct pch_params { u8 station[STATION_ADDR_LEN]; }; /* structure to hold the module parameters */ static struct pch_params pch_param = { "00:00:00:00:00:00" }; /* * Register access functions */ static inline void pch_eth_enable_set(struct pch_dev *chip) { u32 val; /* SET the eth_enable bit */ val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH); iowrite32(val, (&chip->regs->ts_sel)); } static u64 pch_systime_read(struct pch_ts_regs *regs) { u64 ns; u32 lo, hi; lo = ioread32(&regs->systime_lo); hi = ioread32(&regs->systime_hi); ns = ((u64) hi) << 32; ns |= lo; ns <<= TICKS_NS_SHIFT; return ns; } static void pch_systime_write(struct pch_ts_regs *regs, u64 ns) { u32 hi, lo; ns >>= TICKS_NS_SHIFT; hi = ns >> 32; lo = ns & 0xffffffff; iowrite32(lo, &regs->systime_lo); iowrite32(hi, &regs->systime_hi); } static inline void pch_block_reset(struct pch_dev *chip) { u32 val; /* Reset Hardware Assist block */ val = ioread32(&chip->regs->control) | PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); val = val & ~PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); } u32 pch_ch_control_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_control); return val; } EXPORT_SYMBOL(pch_ch_control_read); void pch_ch_control_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_control)); } EXPORT_SYMBOL(pch_ch_control_write); u32 pch_ch_event_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_event); return val; } EXPORT_SYMBOL(pch_ch_event_read); void pch_ch_event_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_event)); } EXPORT_SYMBOL(pch_ch_event_write); u32 pch_src_uuid_lo_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_lo); return val; } EXPORT_SYMBOL(pch_src_uuid_lo_read); u32 pch_src_uuid_hi_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_hi); return val; } EXPORT_SYMBOL(pch_src_uuid_hi_read); u64 pch_rx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->rx_snap_lo); hi = ioread32(&chip->regs->rx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_rx_snap_read); u64 pch_tx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->tx_snap_lo); hi = ioread32(&chip->regs->tx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_tx_snap_read); /* This function enables all 64 bits in system time registers [high & low]. This is a work-around for non continuous value in the SystemTime Register*/ static void pch_set_system_time_count(struct pch_dev *chip) { iowrite32(0x01, &chip->regs->stl_max_set_en); iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set); iowrite32(0x00, &chip->regs->stl_max_set_en); } static void pch_reset(struct pch_dev *chip) { /* Reset Hardware Assist */ pch_block_reset(chip); /* enable all 32 bits in system time registers */ pch_set_system_time_count(chip); } /** * pch_set_station_address() - This API sets the station address used by * IEEE 1588 hardware when looking at PTP * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. */ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { s32 i; struct pch_dev *chip = pci_get_drvdata(pdev); /* Verify the parameter */ if ((chip->regs == 0) || addr == (u8 *)NULL) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* For all station address bytes */ for (i = 0; i < PCH_STATION_BYTES; i++) { u32 val; s32 tmp; tmp = hex_to_bin(addr[i * 3]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val = tmp * 16; tmp = hex_to_bin(addr[(i * 3) + 1]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val += tmp; /* Expects ':' separated addresses */ if ((i < 5) && (addr[(i * 3) + 2] != ':')) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* Ideally we should set the address only after validating entire string */ dev_dbg(&pdev->dev, "invoking pch_station_set\n"); iowrite32(val, &chip->regs->ts_st[i]); } return 0; } /* * Interrupt service routine */ static irqreturn_t isr(int irq, void *priv) { struct pch_dev *pch_dev = priv; struct pch_ts_regs *regs = pch_dev->regs; struct ptp_clock_event event; u32 ack = 0, lo, hi, val; val = ioread32(&regs->event); if (val & PCH_TSE_SNS) { ack |= PCH_TSE_SNS; if (pch_dev->exts0_enabled) { hi = ioread32(&regs->asms_hi); lo = ioread32(&regs->asms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_SNM) { ack |= PCH_TSE_SNM; if (pch_dev->exts1_enabled) { hi = ioread32(&regs->amms_hi); lo = ioread32(&regs->amms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_TTIPEND) ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */ if (ack) { iowrite32(ack, &regs->event); return IRQ_HANDLED; } else return IRQ_NONE; } /* * PTP clock operations */ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { u64 adj; u32 diff, addend; int neg_adj = 0; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } addend = DEFAULT_ADDEND; adj = addend; adj *= ppb; diff = div_u64(adj, 1000000000ULL); addend = neg_adj ? addend - diff : addend + diff; iowrite32(addend, &regs->addend); return 0; } static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); now = pch_systime_read(regs); now += delta; pch_systime_write(regs, now); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts) { u64 ns; u32 remainder; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); ns = pch_systime_read(regs); spin_unlock_irqrestore(&pch_dev->register_lock, flags); ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ts->tv_nsec = remainder; return 0; } static int ptp_pch_settime(struct ptp_clock_info *ptp, const struct timespec *ts) { u64 ns; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; spin_lock_irqsave(&pch_dev->register_lock, flags); pch_systime_write(regs, ns); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); switch (rq->type) { case PTP_CLK_REQ_EXTTS: switch (rq->extts.index) { case 0: pch_dev->exts0_enabled = on ? 1 : 0; break; case 1: pch_dev->exts1_enabled = on ? 1 : 0; break; default: return -EINVAL; } return 0; default: break; } return -EOPNOTSUPP; } static struct ptp_clock_info ptp_pch_caps = { .owner = THIS_MODULE, .name = "PCH timer", .max_adj = 50000000, .n_ext_ts = N_EXT_TS, .pps = 0, .adjfreq = ptp_pch_adjfreq, .adjtime = ptp_pch_adjtime, .gettime = ptp_pch_gettime, .settime = ptp_pch_settime, .enable = ptp_pch_enable, }; #ifdef CONFIG_PM static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state) { pci_disable_device(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); if (pci_save_state(pdev) != 0) { dev_err(&pdev->dev, "could not save PCI config state\n"); return -ENOMEM; } pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static s32 pch_resume(struct pci_dev *pdev) { s32 ret; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device failed\n"); return ret; } pci_enable_wake(pdev, PCI_D3hot, 0); return 0; } #else #define pch_suspend NULL #define pch_resume NULL #endif static void __devexit pch_remove(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); ptp_clock_unregister(chip->ptp_clock); /* free the interrupt */ if (pdev->irq != 0) free_irq(pdev->irq, chip); /* unmap the virtual IO memory space */ if (chip->regs != 0) { iounmap(chip->regs); chip->regs = 0; } /* release the reserved IO memory space */ if (chip->mem_base != 0) { release_mem_region(chip->mem_base, chip->mem_size); chip->mem_base = 0; } pci_disable_device(pdev); kfree(chip); dev_info(&pdev->dev, "complete\n"); } static s32 __devinit pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) { s32 ret; unsigned long flags; struct pch_dev *chip; chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* enable the 1588 pci device */ ret = pci_enable_device(pdev); if (ret != 0) { dev_err(&pdev->dev, "could not enable the pci device\n"); goto err_pci_en; } chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR); if (!chip->mem_base) { dev_err(&pdev->dev, "could not locate IO memory address\n"); ret = -ENODEV; goto err_pci_start; } /* retrieve the available length of the IO memory space */ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR); /* allocate the memory for the device registers */ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) { dev_err(&pdev->dev, "could not allocate register memory space\n"); ret = -EBUSY; goto err_req_mem_region; } /* get the virtual address to the 1588 registers */ chip->regs = ioremap(chip->mem_base, chip->mem_size); if (!chip->regs) { dev_err(&pdev->dev, "Could not get virtual address\n"); ret = -ENOMEM; goto err_ioremap; } chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps); if (IS_ERR(chip->ptp_clock)) return PTR_ERR(chip->ptp_clock); spin_lock_init(&chip->register_lock); ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip); if (ret != 0) { dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq); goto err_req_irq; } /* indicate success */ chip->irq = pdev->irq; chip->pdev = pdev; pci_set_drvdata(pdev, chip); spin_lock_irqsave(&chip->register_lock, flags); /* reset the ieee1588 h/w */ pch_reset(chip); iowrite32(DEFAULT_ADDEND, &chip->regs->addend); iowrite32(1, &chip->regs->trgt_lo); iowrite32(0, &chip->regs->trgt_hi); iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */ iowrite32(0x80020000, &chip->regs->ch_control); pch_eth_enable_set(chip); if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) { if (pch_set_station_address(pch_param.station, pdev) != 0) { dev_err(&pdev->dev, "Invalid station address parameter\n" "Module loaded but station address not set correctly\n" ); } } spin_unlock_irqrestore(&chip->register_lock, flags); return 0; err_req_irq: ptp_clock_unregister(chip->ptp_clock); iounmap(chip->regs); chip->regs = 0; err_ioremap: release_mem_region(chip->mem_base, chip->mem_size); err_req_mem_region: chip->mem_base = 0; err_pci_start: pci_disable_device(pdev); err_pci_en: kfree(chip); dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret); return ret; } static DEFINE_PCI_DEVICE_TABLE(pch_ieee1588_pcidev_id) = { { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_PCH_1588 }, {0} }; static struct pci_driver pch_driver = { .name = KBUILD_MODNAME, .id_table = pch_ieee1588_pcidev_id, .probe = pch_probe, .remove = pch_remove, .suspend = pch_suspend, .resume = pch_resume, }; static void __exit ptp_pch_exit(void) { pci_unregister_driver(&pch_driver); } static s32 __init ptp_pch_init(void) { s32 ret; /* register the driver with the pci core */ ret = pci_register_driver(&pch_driver); return ret; } module_init(ptp_pch_init); module_exit(ptp_pch_exit); module_param_string(station, pch_param.station, sizeof pch_param.station, 0444); MODULE_PARM_DESC(station, "IEEE 1588 station address to use - column separated hex values"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the EG20T timer"); MODULE_LICENSE("GPL");
gpl-2.0
android-armv7a-belalang-tempur/android_kernel_samsung_royss
drivers/ptp/ptp_pch.c
4772
16597
/* * PTP 1588 clock using the EG20T PCH * * Copyright (C) 2010 OMICRON electronics GmbH * Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD. * * This code was derived from the IXP46X driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ptp_clock_kernel.h> #include <linux/slab.h> #define STATION_ADDR_LEN 20 #define PCI_DEVICE_ID_PCH_1588 0x8819 #define IO_MEM_BAR 1 #define DEFAULT_ADDEND 0xA0000000 #define TICKS_NS_SHIFT 5 #define N_EXT_TS 2 enum pch_status { PCH_SUCCESS, PCH_INVALIDPARAM, PCH_NOTIMESTAMP, PCH_INTERRUPTMODEINUSE, PCH_FAILED, PCH_UNSUPPORTED, }; /** * struct pch_ts_regs - IEEE 1588 registers */ struct pch_ts_regs { u32 control; u32 event; u32 addend; u32 accum; u32 test; u32 ts_compare; u32 rsystime_lo; u32 rsystime_hi; u32 systime_lo; u32 systime_hi; u32 trgt_lo; u32 trgt_hi; u32 asms_lo; u32 asms_hi; u32 amms_lo; u32 amms_hi; u32 ch_control; u32 ch_event; u32 tx_snap_lo; u32 tx_snap_hi; u32 rx_snap_lo; u32 rx_snap_hi; u32 src_uuid_lo; u32 src_uuid_hi; u32 can_status; u32 can_snap_lo; u32 can_snap_hi; u32 ts_sel; u32 ts_st[6]; u32 reserve1[14]; u32 stl_max_set_en; u32 stl_max_set; u32 reserve2[13]; u32 srst; }; #define PCH_TSC_RESET (1 << 0) #define PCH_TSC_TTM_MASK (1 << 1) #define PCH_TSC_ASMS_MASK (1 << 2) #define PCH_TSC_AMMS_MASK (1 << 3) #define PCH_TSC_PPSM_MASK (1 << 4) #define PCH_TSE_TTIPEND (1 << 1) #define PCH_TSE_SNS (1 << 2) #define PCH_TSE_SNM (1 << 3) #define PCH_TSE_PPS (1 << 4) #define PCH_CC_MM (1 << 0) #define PCH_CC_TA (1 << 1) #define PCH_CC_MODE_SHIFT 16 #define PCH_CC_MODE_MASK 0x001F0000 #define PCH_CC_VERSION (1 << 31) #define PCH_CE_TXS (1 << 0) #define PCH_CE_RXS (1 << 1) #define PCH_CE_OVR (1 << 0) #define PCH_CE_VAL (1 << 1) #define PCH_ECS_ETH (1 << 0) #define PCH_ECS_CAN (1 << 1) #define PCH_STATION_BYTES 6 #define PCH_IEEE1588_ETH (1 << 0) #define PCH_IEEE1588_CAN (1 << 1) /** * struct pch_dev - Driver private data */ struct pch_dev { struct pch_ts_regs *regs; struct ptp_clock *ptp_clock; struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; u32 mem_base; u32 mem_size; u32 irq; struct pci_dev *pdev; spinlock_t register_lock; }; /** * struct pch_params - 1588 module parameter */ struct pch_params { u8 station[STATION_ADDR_LEN]; }; /* structure to hold the module parameters */ static struct pch_params pch_param = { "00:00:00:00:00:00" }; /* * Register access functions */ static inline void pch_eth_enable_set(struct pch_dev *chip) { u32 val; /* SET the eth_enable bit */ val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH); iowrite32(val, (&chip->regs->ts_sel)); } static u64 pch_systime_read(struct pch_ts_regs *regs) { u64 ns; u32 lo, hi; lo = ioread32(&regs->systime_lo); hi = ioread32(&regs->systime_hi); ns = ((u64) hi) << 32; ns |= lo; ns <<= TICKS_NS_SHIFT; return ns; } static void pch_systime_write(struct pch_ts_regs *regs, u64 ns) { u32 hi, lo; ns >>= TICKS_NS_SHIFT; hi = ns >> 32; lo = ns & 0xffffffff; iowrite32(lo, &regs->systime_lo); iowrite32(hi, &regs->systime_hi); } static inline void pch_block_reset(struct pch_dev *chip) { u32 val; /* Reset Hardware Assist block */ val = ioread32(&chip->regs->control) | PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); val = val & ~PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); } u32 pch_ch_control_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_control); return val; } EXPORT_SYMBOL(pch_ch_control_read); void pch_ch_control_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_control)); } EXPORT_SYMBOL(pch_ch_control_write); u32 pch_ch_event_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_event); return val; } EXPORT_SYMBOL(pch_ch_event_read); void pch_ch_event_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_event)); } EXPORT_SYMBOL(pch_ch_event_write); u32 pch_src_uuid_lo_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_lo); return val; } EXPORT_SYMBOL(pch_src_uuid_lo_read); u32 pch_src_uuid_hi_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_hi); return val; } EXPORT_SYMBOL(pch_src_uuid_hi_read); u64 pch_rx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->rx_snap_lo); hi = ioread32(&chip->regs->rx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_rx_snap_read); u64 pch_tx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->tx_snap_lo); hi = ioread32(&chip->regs->tx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_tx_snap_read); /* This function enables all 64 bits in system time registers [high & low]. This is a work-around for non continuous value in the SystemTime Register*/ static void pch_set_system_time_count(struct pch_dev *chip) { iowrite32(0x01, &chip->regs->stl_max_set_en); iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set); iowrite32(0x00, &chip->regs->stl_max_set_en); } static void pch_reset(struct pch_dev *chip) { /* Reset Hardware Assist */ pch_block_reset(chip); /* enable all 32 bits in system time registers */ pch_set_system_time_count(chip); } /** * pch_set_station_address() - This API sets the station address used by * IEEE 1588 hardware when looking at PTP * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. */ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { s32 i; struct pch_dev *chip = pci_get_drvdata(pdev); /* Verify the parameter */ if ((chip->regs == 0) || addr == (u8 *)NULL) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* For all station address bytes */ for (i = 0; i < PCH_STATION_BYTES; i++) { u32 val; s32 tmp; tmp = hex_to_bin(addr[i * 3]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val = tmp * 16; tmp = hex_to_bin(addr[(i * 3) + 1]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val += tmp; /* Expects ':' separated addresses */ if ((i < 5) && (addr[(i * 3) + 2] != ':')) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* Ideally we should set the address only after validating entire string */ dev_dbg(&pdev->dev, "invoking pch_station_set\n"); iowrite32(val, &chip->regs->ts_st[i]); } return 0; } /* * Interrupt service routine */ static irqreturn_t isr(int irq, void *priv) { struct pch_dev *pch_dev = priv; struct pch_ts_regs *regs = pch_dev->regs; struct ptp_clock_event event; u32 ack = 0, lo, hi, val; val = ioread32(&regs->event); if (val & PCH_TSE_SNS) { ack |= PCH_TSE_SNS; if (pch_dev->exts0_enabled) { hi = ioread32(&regs->asms_hi); lo = ioread32(&regs->asms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_SNM) { ack |= PCH_TSE_SNM; if (pch_dev->exts1_enabled) { hi = ioread32(&regs->amms_hi); lo = ioread32(&regs->amms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_TTIPEND) ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */ if (ack) { iowrite32(ack, &regs->event); return IRQ_HANDLED; } else return IRQ_NONE; } /* * PTP clock operations */ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { u64 adj; u32 diff, addend; int neg_adj = 0; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } addend = DEFAULT_ADDEND; adj = addend; adj *= ppb; diff = div_u64(adj, 1000000000ULL); addend = neg_adj ? addend - diff : addend + diff; iowrite32(addend, &regs->addend); return 0; } static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); now = pch_systime_read(regs); now += delta; pch_systime_write(regs, now); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts) { u64 ns; u32 remainder; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); ns = pch_systime_read(regs); spin_unlock_irqrestore(&pch_dev->register_lock, flags); ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ts->tv_nsec = remainder; return 0; } static int ptp_pch_settime(struct ptp_clock_info *ptp, const struct timespec *ts) { u64 ns; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; spin_lock_irqsave(&pch_dev->register_lock, flags); pch_systime_write(regs, ns); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); switch (rq->type) { case PTP_CLK_REQ_EXTTS: switch (rq->extts.index) { case 0: pch_dev->exts0_enabled = on ? 1 : 0; break; case 1: pch_dev->exts1_enabled = on ? 1 : 0; break; default: return -EINVAL; } return 0; default: break; } return -EOPNOTSUPP; } static struct ptp_clock_info ptp_pch_caps = { .owner = THIS_MODULE, .name = "PCH timer", .max_adj = 50000000, .n_ext_ts = N_EXT_TS, .pps = 0, .adjfreq = ptp_pch_adjfreq, .adjtime = ptp_pch_adjtime, .gettime = ptp_pch_gettime, .settime = ptp_pch_settime, .enable = ptp_pch_enable, }; #ifdef CONFIG_PM static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state) { pci_disable_device(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); if (pci_save_state(pdev) != 0) { dev_err(&pdev->dev, "could not save PCI config state\n"); return -ENOMEM; } pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static s32 pch_resume(struct pci_dev *pdev) { s32 ret; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device failed\n"); return ret; } pci_enable_wake(pdev, PCI_D3hot, 0); return 0; } #else #define pch_suspend NULL #define pch_resume NULL #endif static void __devexit pch_remove(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); ptp_clock_unregister(chip->ptp_clock); /* free the interrupt */ if (pdev->irq != 0) free_irq(pdev->irq, chip); /* unmap the virtual IO memory space */ if (chip->regs != 0) { iounmap(chip->regs); chip->regs = 0; } /* release the reserved IO memory space */ if (chip->mem_base != 0) { release_mem_region(chip->mem_base, chip->mem_size); chip->mem_base = 0; } pci_disable_device(pdev); kfree(chip); dev_info(&pdev->dev, "complete\n"); } static s32 __devinit pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) { s32 ret; unsigned long flags; struct pch_dev *chip; chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* enable the 1588 pci device */ ret = pci_enable_device(pdev); if (ret != 0) { dev_err(&pdev->dev, "could not enable the pci device\n"); goto err_pci_en; } chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR); if (!chip->mem_base) { dev_err(&pdev->dev, "could not locate IO memory address\n"); ret = -ENODEV; goto err_pci_start; } /* retrieve the available length of the IO memory space */ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR); /* allocate the memory for the device registers */ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) { dev_err(&pdev->dev, "could not allocate register memory space\n"); ret = -EBUSY; goto err_req_mem_region; } /* get the virtual address to the 1588 registers */ chip->regs = ioremap(chip->mem_base, chip->mem_size); if (!chip->regs) { dev_err(&pdev->dev, "Could not get virtual address\n"); ret = -ENOMEM; goto err_ioremap; } chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps); if (IS_ERR(chip->ptp_clock)) return PTR_ERR(chip->ptp_clock); spin_lock_init(&chip->register_lock); ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip); if (ret != 0) { dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq); goto err_req_irq; } /* indicate success */ chip->irq = pdev->irq; chip->pdev = pdev; pci_set_drvdata(pdev, chip); spin_lock_irqsave(&chip->register_lock, flags); /* reset the ieee1588 h/w */ pch_reset(chip); iowrite32(DEFAULT_ADDEND, &chip->regs->addend); iowrite32(1, &chip->regs->trgt_lo); iowrite32(0, &chip->regs->trgt_hi); iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */ iowrite32(0x80020000, &chip->regs->ch_control); pch_eth_enable_set(chip); if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) { if (pch_set_station_address(pch_param.station, pdev) != 0) { dev_err(&pdev->dev, "Invalid station address parameter\n" "Module loaded but station address not set correctly\n" ); } } spin_unlock_irqrestore(&chip->register_lock, flags); return 0; err_req_irq: ptp_clock_unregister(chip->ptp_clock); iounmap(chip->regs); chip->regs = 0; err_ioremap: release_mem_region(chip->mem_base, chip->mem_size); err_req_mem_region: chip->mem_base = 0; err_pci_start: pci_disable_device(pdev); err_pci_en: kfree(chip); dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret); return ret; } static DEFINE_PCI_DEVICE_TABLE(pch_ieee1588_pcidev_id) = { { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_PCH_1588 }, {0} }; static struct pci_driver pch_driver = { .name = KBUILD_MODNAME, .id_table = pch_ieee1588_pcidev_id, .probe = pch_probe, .remove = pch_remove, .suspend = pch_suspend, .resume = pch_resume, }; static void __exit ptp_pch_exit(void) { pci_unregister_driver(&pch_driver); } static s32 __init ptp_pch_init(void) { s32 ret; /* register the driver with the pci core */ ret = pci_register_driver(&pch_driver); return ret; } module_init(ptp_pch_init); module_exit(ptp_pch_exit); module_param_string(station, pch_param.station, sizeof pch_param.station, 0444); MODULE_PARM_DESC(station, "IEEE 1588 station address to use - column separated hex values"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the EG20T timer"); MODULE_LICENSE("GPL");
gpl-2.0
TeamOrion-Devices/kernel_htc_msm8974
drivers/input/touchscreen/cyttsp_i2c.c
5028
3390
/* * Source for: * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contact Cypress Semiconductor at www.cypress.com <kev@cypress.com> * */ #include "cyttsp_core.h" #include <linux/i2c.h> #include <linux/input.h> #define CY_I2C_DATA_SIZE 128 static int cyttsp_i2c_read_block_data(struct cyttsp *ts, u8 addr, u8 length, void *values) { struct i2c_client *client = to_i2c_client(ts->dev); struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, .buf = values, }, }; int retval; retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (retval < 0) return retval; return retval != ARRAY_SIZE(msgs) ? -EIO : 0; } static int cyttsp_i2c_write_block_data(struct cyttsp *ts, u8 addr, u8 length, const void *values) { struct i2c_client *client = to_i2c_client(ts->dev); int retval; ts->xfer_buf[0] = addr; memcpy(&ts->xfer_buf[1], values, length); retval = i2c_master_send(client, ts->xfer_buf, length + 1); return retval < 0 ? retval : 0; } static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = { .bustype = BUS_I2C, .write = cyttsp_i2c_write_block_data, .read = cyttsp_i2c_read_block_data, }; static int __devinit cyttsp_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cyttsp *ts; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "I2C functionality not Supported\n"); return -EIO; } ts = cyttsp_probe(&cyttsp_i2c_bus_ops, &client->dev, client->irq, CY_I2C_DATA_SIZE); if (IS_ERR(ts)) return PTR_ERR(ts); i2c_set_clientdata(client, ts); return 0; } static int __devexit cyttsp_i2c_remove(struct i2c_client *client) { struct cyttsp *ts = i2c_get_clientdata(client); cyttsp_remove(ts); return 0; } static const struct i2c_device_id cyttsp_i2c_id[] = { { CY_I2C_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cyttsp_i2c_id); static struct i2c_driver cyttsp_i2c_driver = { .driver = { .name = CY_I2C_NAME, .owner = THIS_MODULE, .pm = &cyttsp_pm_ops, }, .probe = cyttsp_i2c_probe, .remove = __devexit_p(cyttsp_i2c_remove), .id_table = cyttsp_i2c_id, }; module_i2c_driver(cyttsp_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver"); MODULE_AUTHOR("Cypress"); MODULE_ALIAS("i2c:cyttsp");
gpl-2.0
173210/android_kernel_samsung_smdk4412
arch/powerpc/sysdev/of_rtc.c
10916
1597
/* * Instantiate mmio-mapped RTC chips based on device tree information * * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> #include <linux/of_platform.h> #include <linux/slab.h> static __initdata struct { const char *compatible; char *plat_name; } of_rtc_table[] = { { "ds1743-nvram", "rtc-ds1742" }, }; void __init of_instantiate_rtc(void) { struct device_node *node; int err; int i; for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) { char *plat_name = of_rtc_table[i].plat_name; for_each_compatible_node(node, NULL, of_rtc_table[i].compatible) { struct resource *res; res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) { printk(KERN_ERR "OF RTC: Out of memory " "allocating resource structure for %s\n", node->full_name); continue; } err = of_address_to_resource(node, 0, res); if (err) { printk(KERN_ERR "OF RTC: Error " "translating resources for %s\n", node->full_name); continue; } printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n", node->full_name, plat_name, (unsigned long long)res->start, (unsigned long long)res->end); platform_device_register_simple(plat_name, -1, res, 1); } } }
gpl-2.0
vet-note/android_kernel_samsung_smdk4210
arch/powerpc/platforms/82xx/mpc8272_ads.c
11428
6066
/* * MPC8272 ADS board support * * Copyright 2007 Freescale Semiconductor, Inc. * Author: Scott Wood <scottwood@freescale.com> * * Based on code by Vitaly Bordug <vbordug@ru.mvista.com> * Copyright (c) 2006 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <linux/io.h> #include <asm/cpm2.h> #include <asm/udbg.h> #include <asm/machdep.h> #include <asm/time.h> #include <platforms/82xx/pq2.h> #include <sysdev/fsl_soc.h> #include <sysdev/cpm2_pic.h> #include "pq2.h" static void __init mpc8272_ads_pic_init(void) { struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); if (!np) { printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); return; } cpm2_pic_init(np); of_node_put(np); /* Initialize stuff for the 82xx CPLD IC and install demux */ pq2ads_pci_init_irq(); } struct cpm_pin { int port, pin, flags; }; static struct cpm_pin mpc8272_ads_pins[] = { /* SCC1 */ {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* SCC4 */ {3, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* FCC1 */ {0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* FCC2 */ {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {2, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* I2C */ {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, /* USB */ {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(mpc8272_ads_pins); i++) { struct cpm_pin *pin = &mpc8272_ads_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_BRG4, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_BRG4, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK11, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK15, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK16, CPM_CLK_TX); } static void __init mpc8272_ads_setup_arch(void) { struct device_node *np; __be32 __iomem *bcsr; if (ppc_md.progress) ppc_md.progress("mpc8272_ads_setup_arch()", 0); cpm2_reset(); np = of_find_compatible_node(NULL, NULL, "fsl,mpc8272ads-bcsr"); if (!np) { printk(KERN_ERR "No bcsr in device tree\n"); return; } bcsr = of_iomap(np, 0); of_node_put(np); if (!bcsr) { printk(KERN_ERR "Cannot map BCSR registers\n"); return; } #define BCSR1_FETHIEN 0x08000000 #define BCSR1_FETH_RST 0x04000000 #define BCSR1_RS232_EN1 0x02000000 #define BCSR1_RS232_EN2 0x01000000 #define BCSR3_USB_nEN 0x80000000 #define BCSR3_FETHIEN2 0x10000000 #define BCSR3_FETH2_RST 0x08000000 clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); setbits32(&bcsr[1], BCSR1_FETH_RST); clrbits32(&bcsr[3], BCSR3_FETHIEN2); setbits32(&bcsr[3], BCSR3_FETH2_RST); clrbits32(&bcsr[3], BCSR3_USB_nEN); iounmap(bcsr); init_ioports(); pq2_init_pci(); if (ppc_md.progress) ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0); } static struct of_device_id __initdata of_bus_ids[] = { { .name = "soc", }, { .name = "cpm", }, { .name = "localbus", }, {}, }; static int __init declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(mpc8272_ads, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened */ static int __init mpc8272_ads_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc8272ads"); } define_machine(mpc8272_ads) { .name = "Freescale MPC8272 ADS", .probe = mpc8272_ads_probe, .setup_arch = mpc8272_ads_setup_arch, .init_IRQ = mpc8272_ads_pic_init, .get_irq = cpm2_get_irq, .calibrate_decr = generic_calibrate_decr, .restart = pq2_restart, .progress = udbg_progress, };
gpl-2.0
JustAkan/Oxygen_united_kernel-gproj-lollipop
arch/alpha/kernel/err_titan.c
11940
23439
/* * linux/arch/alpha/kernel/err_titan.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting TITAN systems */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include <asm/irq_regs.h> #include "err_impl.h" #include "proto.h" static int titan_parse_c_misc(u64 c_misc, int print) { #ifdef CONFIG_VERBOSE_MCHECK char *src; int nxs = 0; #endif int status = MCHK_DISPOSITION_REPORT; #define TITAN__CCHIP_MISC__NXM (1UL << 28) #define TITAN__CCHIP_MISC__NXS__S (29) #define TITAN__CCHIP_MISC__NXS__M (0x7) if (!(c_misc & TITAN__CCHIP_MISC__NXM)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); switch(nxs) { case 0: /* CPU 0 */ case 1: /* CPU 1 */ case 2: /* CPU 2 */ case 3: /* CPU 3 */ src = "CPU"; /* num is already the CPU number */ break; case 4: /* Pchip 0 */ case 5: /* Pchip 1 */ src = "Pchip"; nxs -= 4; break; default:/* reserved */ src = "Unknown, NXS ="; /* leave num untouched */ break; } printk("%s Non-existent memory access from: %s %d\n", err_print_prefix, src, nxs); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_serror(int which, u64 serror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const serror_src[] = { "GPCI", "APCI", "AGP HP", "AGP LP" }; static const char * const serror_cmd[] = { "DMA Read", "DMA RMW", "SGTE Read", "Reserved" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) #define TITAN__PCHIP_SERROR__UECC (1UL << 1) #define TITAN__PCHIP_SERROR__CRE (1UL << 2) #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE) #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE | \ TITAN__PCHIP_SERROR__NXIO | \ TITAN__PCHIP_SERROR__LOST_CRE) #define TITAN__PCHIP_SERROR__SRC__S (52) #define TITAN__PCHIP_SERROR__SRC__M (0x3) #define TITAN__PCHIP_SERROR__CMD__S (54) #define TITAN__PCHIP_SERROR__CMD__M (0x3) #define TITAN__PCHIP_SERROR__SYN__S (56) #define TITAN__PCHIP_SERROR__SYN__M (0xff) #define TITAN__PCHIP_SERROR__ADDR__S (15) #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); } if (serror & TITAN__PCHIP_SERROR__NXIO) printk("%s Non Existent I/O Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_UECC) printk("%s Lost Uncorrectable ECC Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_CRE) printk("%s Lost Correctable ECC Error\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_perror(int which, int port, u64 perror, int print) { int cmd; unsigned long addr; int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved", "Memory Read", "Memory Write", "Reserved", "Reserved", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_PERROR__LOST (1UL << 0) #define TITAN__PCHIP_PERROR__SERR (1UL << 1) #define TITAN__PCHIP_PERROR__PERR (1UL << 2) #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) #define TITAN__PCHIP_PERROR__SGE (1UL << 4) #define TITAN__PCHIP_PERROR__APE (1UL << 5) #define TITAN__PCHIP_PERROR__TA (1UL << 6) #define TITAN__PCHIP_PERROR__DPE (1UL << 7) #define TITAN__PCHIP_PERROR__NDS (1UL << 8) #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ TITAN__PCHIP_PERROR__SERR | \ TITAN__PCHIP_PERROR__PERR | \ TITAN__PCHIP_PERROR__DCRTO | \ TITAN__PCHIP_PERROR__SGE | \ TITAN__PCHIP_PERROR__APE | \ TITAN__PCHIP_PERROR__TA | \ TITAN__PCHIP_PERROR__DPE | \ TITAN__PCHIP_PERROR__NDS | \ TITAN__PCHIP_PERROR__IPTPR | \ TITAN__PCHIP_PERROR__IPTPW) #define TITAN__PCHIP_PERROR__DAC (1UL << 47) #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) #define TITAN__PCHIP_PERROR__CMD__S (52) #define TITAN__PCHIP_PERROR__CMD__M (0x0f) #define TITAN__PCHIP_PERROR__ADDR__S (14) #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; /* * Initializing the BIOS on a video card on a bus without * a south bridge (subtractive decode agent) can result in * master aborts as the BIOS probes the capabilities of the * card. XFree86 does such initialization. If the error * is a master abort (No DevSel as PCI Master) and the command * is an I/O read or write below the address where we start * assigning PCI I/O spaces (SRM uses 0x1000), then mark the * error as dismissable so starting XFree86 doesn't result * in a series of uncorrectable errors being reported. Also * dismiss master aborts to VGA frame buffer space * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) * for the same reason. * * Also mark the error dismissible if it looks like the right * error but only the Lost bit is set. Since the BIOS initialization * can cause multiple master aborts and the error interrupt can * be handled on a different CPU than the BIOS code is run on, * it is possible for a second master abort to occur between the * time the PALcode reads PERROR and the time it writes PERROR * to acknowledge the error. If this timing happens, a second * error will be signalled after the first, and if no additional * errors occur, will look like a Lost error with no additional * errors on the same transaction as the previous error. */ if (((perror & TITAN__PCHIP_PERROR__NDS) || ((perror & TITAN__PCHIP_PERROR__ERRMASK) == TITAN__PCHIP_PERROR__LOST)) && ((((cmd & 0xE) == 2) && (addr < 0x1000)) || (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { status = MCHK_DISPOSITION_DISMISS; } #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__IPTPR) printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__NDS) printk("%s No DEVSEL as PCI Master [Master Abort]\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DPE) printk("%s Data Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__TA) printk("%s Target Abort\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__APE) printk("%s Address Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SGE) printk("%s Scatter-Gather Error, Invalid PTE\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DCRTO) printk("%s Delayed-Completion Retry Timeout\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__PERR) printk("%s PERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SERR) printk("%s SERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s\n" " Address: 0x%lx\n", err_print_prefix, cmd, perror_cmd[cmd], addr); if (perror & TITAN__PCHIP_PERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_agperror(int which, u64 agperror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK int cmd, len; unsigned long addr; static const char * const agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", "Write (low-priority)", "Write (high-priority)", "Reserved", "Reserved", "Flush", "Fence" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ TITAN__PCHIP_AGPERROR__LPQFULL | \ TITAN__PCHIP_AGPERROR__HPQFULL | \ TITAN__PCHIP_AGPERROR__RESCMD | \ TITAN__PCHIP_AGPERROR__IPTE | \ TITAN__PCHIP_AGPERROR__PTP | \ TITAN__PCHIP_AGPERROR__NOWINDOW) #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) #define TITAN__PCHIP_AGPERROR__CMD__S (50) #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) #define TITAN__PCHIP_AGPERROR__ADDR__S (15) #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) #define TITAN__PCHIP_AGPERROR__LEN__S (53) #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__PTP) printk("%s Peer-to-Peer set\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__IPTE) printk("%s Invalid PTE\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) printk("%s Reserved Command\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) printk("%s HP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) printk("%s LP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s, %d Quadwords%s\n" " Address: 0x%lx\n", err_print_prefix, cmd, agperror_cmd[cmd], len, (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", addr); if (agperror & TITAN__PCHIP_AGPERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_chip(int which, u64 serror, u64 gperror, u64 aperror, u64 agperror, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_p_serror(which, serror, print); status |= titan_parse_p_perror(which, 0, gperror, print); status |= titan_parse_p_perror(which, 1, aperror, print); status |= titan_parse_p_agperror(which, agperror, print); return status; } int titan_process_logout_frame(struct el_common *mchk_header, int print) { struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_c_misc(tmchk->c_misc, print); status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, tmchk->p0_aperror, tmchk->p0_agperror, print); status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, tmchk->p1_aperror, tmchk->p1_agperror, print); return status; } void titan_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); u64 irqmask; /* * Mask of Titan interrupt sources which are reported as machine checks * * 63 - CChip Error * 62 - PChip 0 H_Error * 61 - PChip 1 H_Error * 60 - PChip 0 C_Error * 59 - PChip 1 C_Error */ #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL /* * Sync the processor */ mb(); draina(); /* * Only handle system errors here */ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { ev6_machine_check(vector, la_ptr); return; } /* * It's a system error, handle it here * * The PALcode has already cleared the error, so just parse it */ /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ if (titan_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s" "*System %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); #ifdef CONFIG_VERBOSE_MCHECK titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); if (alpha_verbose_mcheck) dik_show_regs(get_irq_regs(), NULL); #endif /* CONFIG_VERBOSE_MCHECK */ err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as system * machine checks to interrupts */ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; titan_dispatch_irqs(irqmask); } /* * Release the logout frame */ wrmces(0x7); mb(); } /* * Subpacket Annotations */ static char *el_titan_pchip0_extended_annotation[] = { "Subpacket Header", "P0_SCTL", "P0_SERREN", "P0_APCTL", "P0_APERREN", "P0_AGPERREN", "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", "P0_GTBA3", NULL }; static char *el_titan_pchip1_extended_annotation[] = { "Subpacket Header", "P1_SCTL", "P1_SERREN", "P1_APCTL", "P1_APERREN", "P1_AGPERREN", "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", "P1_GTBA3", NULL }; static char *el_titan_memory_extended_annotation[] = { "Subpacket Header", "AAR0", "AAR1", "AAR2", "AAR3", "P0_SCTL", "P0_GPCTL", "P0_APCTL", "P1_SCTL", "P1_GPCTL", "P1_SCTL", NULL }; static struct el_subpacket_annotation el_titan_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, 1, "Titan PChip 0 Extended Frame", el_titan_pchip0_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, 1, "Titan PChip 1 Extended Frame", el_titan_pchip1_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, 1, "Titan Memory Extended Frame", el_titan_memory_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__TERMINATION__TERMINATION, 1, "Termination Subpacket", NULL) }; static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: printk("%s ** REGATTA TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } static struct el_subpacket_handler titan_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, el_process_regatta_subpacket); void __init titan_register_error_handlers(void) { size_t i; for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) cdl_register_subpacket_annotation(&el_titan_annotations[i]); cdl_register_subpacket_handler(&titan_subpacket_handler); ev6_register_error_handlers(); } /* * Privateer */ static int privateer_process_680_frame(struct el_common *mchk_header, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct el_PRIVATEER_envdata_mcheck *emchk = (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); /* TODO - categorize errors, for now, no error */ if (!print) return status; /* TODO - decode instead of just dumping... */ printk("%s Summary Flags: %016llx\n" " CChip DIRx: %016llx\n" " System Management IR: %016llx\n" " CPU IR: %016llx\n" " Power Supply IR: %016llx\n" " LM78 Fault Status: %016llx\n" " System Doors: %016llx\n" " Temperature Warning: %016llx\n" " Fan Control: %016llx\n" " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, emchk->smir, emchk->cpuir, emchk->psir, emchk->fault, emchk->sys_doors, emchk->temp_warn, emchk->fan_ctrl, emchk->code); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } int privateer_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * Machine check codes */ #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ switch(ev6mchk->MCHK_Code) { /* * Vector 630 - Processor, Correctable */ case PRIVATEER_MCHK__CORR_ECC: case PRIVATEER_MCHK__DC_TAG_PERR: /* * Fall through to vector 670 for processing... */ /* * Vector 670 - Processor, Uncorrectable */ case PRIVATEER_MCHK__PAL_BUGCHECK: case PRIVATEER_MCHK__OS_BUGCHECK: case PRIVATEER_MCHK__PROC_HRD_ERR: case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: status |= ev6_process_logout_frame(mchk_header, print); break; /* * Vector 620 - System, Correctable */ case PRIVATEER_MCHK__SYS_CORR_ERR: /* * Fall through to vector 660 for processing... */ /* * Vector 660 - System, Uncorrectable */ case PRIVATEER_MCHK__SYS_HRD_ERR: status |= titan_process_logout_frame(mchk_header, print); break; /* * Vector 680 - System, Environmental */ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ status |= privateer_process_680_frame(mchk_header, print); break; /* * Unknown */ default: status |= MCHK_DISPOSITION_REPORT; if (print) { printk("%s** Unknown Error, frame follows\n", err_print_prefix); mchk_dump_logout_frame(mchk_header); } } return status; } void privateer_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); u64 irqmask; char *saved_err_prefix = err_print_prefix; #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) /* * Sync the processor. */ mb(); draina(); /* * Only handle system events here. */ if (vector != SCB_Q_SYSEVENT) return titan_machine_check(vector, la_ptr); /* * Report the event - System Events should be reported even if no * error is indicated since the event could indicate the return * to normal status. */ err_print_prefix = KERN_CRIT; printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (unsigned int)vector, (int)smp_processor_id()); privateer_process_680_frame(mchk_header, 1); err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as 680 machine * checks to interrupts. */ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; /* * Dispatch the interrupt(s). */ titan_dispatch_irqs(irqmask); /* * Release the logout frame. */ wrmces(0x7); mb(); }
gpl-2.0
draekko/android_kernel_samsung_kylessopen
sound/pci/ctxfi/ctamixer.c
12708
9927
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctamixer.c * * @Brief * This file contains the implementation of the Audio Mixer * resource management object. * * @Author Liu Chun * @Date May 21 2008 * */ #include "ctamixer.h" #include "cthardware.h" #include <linux/slab.h> #define AMIXER_RESOURCE_NUM 256 #define SUM_RESOURCE_NUM 256 #define AMIXER_Y_IMMEDIATE 1 #define BLANK_SLOT 4094 static int amixer_master(struct rsc *rsc) { rsc->conj = 0; return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; } static int amixer_next_conj(struct rsc *rsc) { rsc->conj++; return container_of(rsc, struct amixer, rsc)->idx[rsc->conj]; } static int amixer_index(const struct rsc *rsc) { return container_of(rsc, struct amixer, rsc)->idx[rsc->conj]; } static int amixer_output_slot(const struct rsc *rsc) { return (amixer_index(rsc) << 4) + 0x4; } static struct rsc_ops amixer_basic_rsc_ops = { .master = amixer_master, .next_conj = amixer_next_conj, .index = amixer_index, .output_slot = amixer_output_slot, }; static int amixer_set_input(struct amixer *amixer, struct rsc *rsc) { struct hw *hw; hw = amixer->rsc.hw; hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE); amixer->input = rsc; if (!rsc) hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT); else hw->amixer_set_x(amixer->rsc.ctrl_blk, rsc->ops->output_slot(rsc)); return 0; } /* y is a 14-bit immediate constant */ static int amixer_set_y(struct amixer *amixer, unsigned int y) { struct hw *hw; hw = amixer->rsc.hw; hw->amixer_set_y(amixer->rsc.ctrl_blk, y); return 0; } static int amixer_set_invalid_squash(struct amixer *amixer, unsigned int iv) { struct hw *hw; hw = amixer->rsc.hw; hw->amixer_set_iv(amixer->rsc.ctrl_blk, iv); return 0; } static int amixer_set_sum(struct amixer *amixer, struct sum *sum) { struct hw *hw; hw = amixer->rsc.hw; amixer->sum = sum; if (!sum) { hw->amixer_set_se(amixer->rsc.ctrl_blk, 0); } else { hw->amixer_set_se(amixer->rsc.ctrl_blk, 1); hw->amixer_set_sadr(amixer->rsc.ctrl_blk, sum->rsc.ops->index(&sum->rsc)); } return 0; } static int amixer_commit_write(struct amixer *amixer) { struct hw *hw; unsigned int index; int i; struct rsc *input; struct sum *sum; hw = amixer->rsc.hw; input = amixer->input; sum = amixer->sum; /* Program master and conjugate resources */ amixer->rsc.ops->master(&amixer->rsc); if (input) input->ops->master(input); if (sum) sum->rsc.ops->master(&sum->rsc); for (i = 0; i < amixer->rsc.msr; i++) { hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk); if (input) { hw->amixer_set_x(amixer->rsc.ctrl_blk, input->ops->output_slot(input)); input->ops->next_conj(input); } if (sum) { hw->amixer_set_sadr(amixer->rsc.ctrl_blk, sum->rsc.ops->index(&sum->rsc)); sum->rsc.ops->next_conj(&sum->rsc); } index = amixer->rsc.ops->output_slot(&amixer->rsc); hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk); amixer->rsc.ops->next_conj(&amixer->rsc); } amixer->rsc.ops->master(&amixer->rsc); if (input) input->ops->master(input); if (sum) sum->rsc.ops->master(&sum->rsc); return 0; } static int amixer_commit_raw_write(struct amixer *amixer) { struct hw *hw; unsigned int index; hw = amixer->rsc.hw; index = amixer->rsc.ops->output_slot(&amixer->rsc); hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk); return 0; } static int amixer_get_y(struct amixer *amixer) { struct hw *hw; hw = amixer->rsc.hw; return hw->amixer_get_y(amixer->rsc.ctrl_blk); } static int amixer_setup(struct amixer *amixer, struct rsc *input, unsigned int scale, struct sum *sum) { amixer_set_input(amixer, input); amixer_set_y(amixer, scale); amixer_set_sum(amixer, sum); amixer_commit_write(amixer); return 0; } static struct amixer_rsc_ops amixer_ops = { .set_input = amixer_set_input, .set_invalid_squash = amixer_set_invalid_squash, .set_scale = amixer_set_y, .set_sum = amixer_set_sum, .commit_write = amixer_commit_write, .commit_raw_write = amixer_commit_raw_write, .setup = amixer_setup, .get_scale = amixer_get_y, }; static int amixer_rsc_init(struct amixer *amixer, const struct amixer_desc *desc, struct amixer_mgr *mgr) { int err; err = rsc_init(&amixer->rsc, amixer->idx[0], AMIXER, desc->msr, mgr->mgr.hw); if (err) return err; /* Set amixer specific operations */ amixer->rsc.ops = &amixer_basic_rsc_ops; amixer->ops = &amixer_ops; amixer->input = NULL; amixer->sum = NULL; amixer_setup(amixer, NULL, 0, NULL); return 0; } static int amixer_rsc_uninit(struct amixer *amixer) { amixer_setup(amixer, NULL, 0, NULL); rsc_uninit(&amixer->rsc); amixer->ops = NULL; amixer->input = NULL; amixer->sum = NULL; return 0; } static int get_amixer_rsc(struct amixer_mgr *mgr, const struct amixer_desc *desc, struct amixer **ramixer) { int err, i; unsigned int idx; struct amixer *amixer; unsigned long flags; *ramixer = NULL; /* Allocate mem for amixer resource */ amixer = kzalloc(sizeof(*amixer), GFP_KERNEL); if (!amixer) return -ENOMEM; /* Check whether there are sufficient * amixer resources to meet request. */ err = 0; spin_lock_irqsave(&mgr->mgr_lock, flags); for (i = 0; i < desc->msr; i++) { err = mgr_get_resource(&mgr->mgr, 1, &idx); if (err) break; amixer->idx[i] = idx; } spin_unlock_irqrestore(&mgr->mgr_lock, flags); if (err) { printk(KERN_ERR "ctxfi: Can't meet AMIXER resource request!\n"); goto error; } err = amixer_rsc_init(amixer, desc, mgr); if (err) goto error; *ramixer = amixer; return 0; error: spin_lock_irqsave(&mgr->mgr_lock, flags); for (i--; i >= 0; i--) mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]); spin_unlock_irqrestore(&mgr->mgr_lock, flags); kfree(amixer); return err; } static int put_amixer_rsc(struct amixer_mgr *mgr, struct amixer *amixer) { unsigned long flags; int i; spin_lock_irqsave(&mgr->mgr_lock, flags); for (i = 0; i < amixer->rsc.msr; i++) mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]); spin_unlock_irqrestore(&mgr->mgr_lock, flags); amixer_rsc_uninit(amixer); kfree(amixer); return 0; } int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr) { int err; struct amixer_mgr *amixer_mgr; *ramixer_mgr = NULL; amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL); if (!amixer_mgr) return -ENOMEM; err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw); if (err) goto error; spin_lock_init(&amixer_mgr->mgr_lock); amixer_mgr->get_amixer = get_amixer_rsc; amixer_mgr->put_amixer = put_amixer_rsc; *ramixer_mgr = amixer_mgr; return 0; error: kfree(amixer_mgr); return err; } int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr) { rsc_mgr_uninit(&amixer_mgr->mgr); kfree(amixer_mgr); return 0; } /* SUM resource management */ static int sum_master(struct rsc *rsc) { rsc->conj = 0; return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; } static int sum_next_conj(struct rsc *rsc) { rsc->conj++; return container_of(rsc, struct sum, rsc)->idx[rsc->conj]; } static int sum_index(const struct rsc *rsc) { return container_of(rsc, struct sum, rsc)->idx[rsc->conj]; } static int sum_output_slot(const struct rsc *rsc) { return (sum_index(rsc) << 4) + 0xc; } static struct rsc_ops sum_basic_rsc_ops = { .master = sum_master, .next_conj = sum_next_conj, .index = sum_index, .output_slot = sum_output_slot, }; static int sum_rsc_init(struct sum *sum, const struct sum_desc *desc, struct sum_mgr *mgr) { int err; err = rsc_init(&sum->rsc, sum->idx[0], SUM, desc->msr, mgr->mgr.hw); if (err) return err; sum->rsc.ops = &sum_basic_rsc_ops; return 0; } static int sum_rsc_uninit(struct sum *sum) { rsc_uninit(&sum->rsc); return 0; } static int get_sum_rsc(struct sum_mgr *mgr, const struct sum_desc *desc, struct sum **rsum) { int err, i; unsigned int idx; struct sum *sum; unsigned long flags; *rsum = NULL; /* Allocate mem for sum resource */ sum = kzalloc(sizeof(*sum), GFP_KERNEL); if (!sum) return -ENOMEM; /* Check whether there are sufficient sum resources to meet request. */ err = 0; spin_lock_irqsave(&mgr->mgr_lock, flags); for (i = 0; i < desc->msr; i++) { err = mgr_get_resource(&mgr->mgr, 1, &idx); if (err) break; sum->idx[i] = idx; } spin_unlock_irqrestore(&mgr->mgr_lock, flags); if (err) { printk(KERN_ERR "ctxfi: Can't meet SUM resource request!\n"); goto error; } err = sum_rsc_init(sum, desc, mgr); if (err) goto error; *rsum = sum; return 0; error: spin_lock_irqsave(&mgr->mgr_lock, flags); for (i--; i >= 0; i--) mgr_put_resource(&mgr->mgr, 1, sum->idx[i]); spin_unlock_irqrestore(&mgr->mgr_lock, flags); kfree(sum); return err; } static int put_sum_rsc(struct sum_mgr *mgr, struct sum *sum) { unsigned long flags; int i; spin_lock_irqsave(&mgr->mgr_lock, flags); for (i = 0; i < sum->rsc.msr; i++) mgr_put_resource(&mgr->mgr, 1, sum->idx[i]); spin_unlock_irqrestore(&mgr->mgr_lock, flags); sum_rsc_uninit(sum); kfree(sum); return 0; } int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr) { int err; struct sum_mgr *sum_mgr; *rsum_mgr = NULL; sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL); if (!sum_mgr) return -ENOMEM; err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw); if (err) goto error; spin_lock_init(&sum_mgr->mgr_lock); sum_mgr->get_sum = get_sum_rsc; sum_mgr->put_sum = put_sum_rsc; *rsum_mgr = sum_mgr; return 0; error: kfree(sum_mgr); return err; } int sum_mgr_destroy(struct sum_mgr *sum_mgr) { rsc_mgr_uninit(&sum_mgr->mgr); kfree(sum_mgr); return 0; }
gpl-2.0
tommytarts/QuantumKernelM8-GPe
fs/isofs/util.c
13476
2441
/* * linux/fs/isofs/util.c */ #include "isofs.h" /* * We have to convert from a MM/DD/YY format to the Unix ctime format. * We have to take into account leap years and all of that good stuff. * Unfortunately, the kernel does not have the information on hand to * take into account daylight savings time, but it shouldn't matter. * The time stored should be localtime (with or without DST in effect), * and the timezone offset should hold the offset required to get back * to GMT. Thus we should always be correct. */ int iso_date(char * p, int flag) { int year, month, day, hour, minute, second, tz; int crtime, days, i; year = p[0] - 70; month = p[1]; day = p[2]; hour = p[3]; minute = p[4]; second = p[5]; if (flag == 0) tz = p[6]; /* High sierra has no time zone */ else tz = 0; if (year < 0) { crtime = 0; } else { int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31}; days = year * 365; if (year > 2) days += (year+1) / 4; for (i = 1; i < month; i++) days += monlen[i-1]; if (((year+2) % 4) == 0 && month > 2) days++; days += day - 1; crtime = ((((days * 24) + hour) * 60 + minute) * 60) + second; /* sign extend */ if (tz & 0x80) tz |= (-1 << 8); /* * The timezone offset is unreliable on some disks, * so we make a sanity check. In no case is it ever * more than 13 hours from GMT, which is 52*15min. * The time is always stored in localtime with the * timezone offset being what get added to GMT to * get to localtime. Thus we need to subtract the offset * to get to true GMT, which is what we store the time * as internally. On the local system, the user may set * their timezone any way they wish, of course, so GMT * gets converted back to localtime on the receiving * system. * * NOTE: mkisofs in versions prior to mkisofs-1.10 had * the sign wrong on the timezone offset. This has now * been corrected there too, but if you are getting screwy * results this may be the explanation. If enough people * complain, a user configuration option could be added * to add the timezone offset in with the wrong sign * for 'compatibility' with older discs, but I cannot see how * it will matter that much. * * Thanks to kuhlmav@elec.canterbury.ac.nz (Volker Kuhlmann) * for pointing out the sign error. */ if (-52 <= tz && tz <= 52) crtime -= tz * 15 * 60; } return crtime; }
gpl-2.0
tanxjian/gec2440-linux
arch/s390/kernel/ptrace.c
165
24877
/* * arch/s390/kernel/ptrace.c * * S390 version * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * * Based on PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@cs.nmt.edu) * * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/elf.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include "entry.h" #ifdef CONFIG_COMPAT #include "compat_ptrace.h" #endif enum s390_regset { REGSET_GENERAL, REGSET_FP, }; static void FixPerRegisters(struct task_struct *task) { struct pt_regs *regs; per_struct *per_info; regs = task_pt_regs(task); per_info = (per_struct *) &task->thread.per_info; per_info->control_regs.bits.em_instruction_fetch = per_info->single_step | per_info->instruction_fetch; if (per_info->single_step) { per_info->control_regs.bits.starting_addr = 0; #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_31BIT)) per_info->control_regs.bits.ending_addr = 0x7fffffffUL; else #endif per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN; } else { per_info->control_regs.bits.starting_addr = per_info->starting_addr; per_info->control_regs.bits.ending_addr = per_info->ending_addr; } /* * if any of the control reg tracing bits are on * we switch on per in the psw */ if (per_info->control_regs.words.cr[0] & PER_EM_MASK) regs->psw.mask |= PSW_MASK_PER; else regs->psw.mask &= ~PSW_MASK_PER; if (per_info->control_regs.bits.em_storage_alteration) per_info->control_regs.bits.storage_alt_space_ctl = 1; else per_info->control_regs.bits.storage_alt_space_ctl = 0; } void user_enable_single_step(struct task_struct *task) { task->thread.per_info.single_step = 1; FixPerRegisters(task); } void user_disable_single_step(struct task_struct *task) { task->thread.per_info.single_step = 0; FixPerRegisters(task); } /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* make sure the single step bit is not set. */ user_disable_single_step(child); } #ifndef CONFIG_64BIT # define __ADDR_MASK 3 #else # define __ADDR_MASK 7 #endif /* * Read the word at offset addr from the user area of a process. The * trouble here is that the information is littered over different * locations. The process registers are found on the kernel stack, * the floating point stuff and the trace settings are stored in * the task structure. In addition the different structures in * struct user contain pad bytes that should be read as zeroes. * Lovely... */ static unsigned long __peek_user(struct task_struct *child, addr_t addr) { struct user *dummy = NULL; addr_t offset, tmp; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == (addr_t) &dummy->regs.psw.mask) /* Remove per bit from user psw. */ tmp &= ~PSW_MASK_PER; } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the * 32 bit acrs[15] value and shift it by 32. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else #endif tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ tmp = (addr_t) task_pt_regs(child)->orig_gpr2; } else if (addr < (addr_t) &dummy->regs.fp_regs) { /* * prevent reads of padding hole between * orig_gpr2 and fp_regs on s390. */ tmp = 0; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) tmp &= (unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; } static int peek_user(struct task_struct *child, addr_t addr, addr_t data) { addr_t tmp, mask; /* * Stupid gdb peeks/pokes the access registers in 64 bit with * an alignment of 4. Programmers from hell... */ mask = __ADDR_MASK; #ifdef CONFIG_64BIT if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; #endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) return -EIO; tmp = __peek_user(child, addr); return put_user(tmp, (addr_t __user *) data); } /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. * Stores to the program status word and on the floating point * control register needs to get checked for validity. */ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask && #ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(psw_user32_bits, data) && #endif data != PSW_MASK_MERGE(psw_user_bits, data)) /* Invalid psw mask. */ return -EINVAL; #ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) /* I'd like to reject addresses without the high order bit but older gdb's rely on it */ data |= PSW_ADDR_AMODE; #endif *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) &dummy->regs.fp_regs) { /* * prevent writes of padding hole between * orig_gpr2 and fp_regs on s390. */ return 0; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy->regs.fp_regs.fpc && (data & ~((unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32))) != 0) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; } FixPerRegisters(child); return 0; } static int poke_user(struct task_struct *child, addr_t addr, addr_t data) { addr_t mask; /* * Stupid gdb peeks/pokes the access registers in 64 bit with * an alignment of 4. Programmers from hell indeed... */ mask = __ADDR_MASK; #ifdef CONFIG_64BIT if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; #endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) return -EIO; return __poke_user(child, addr, data); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) { ptrace_area parea; int copied, ret; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: /* Remove high order bit from address (only for 31 bit). */ addr &= PSW_ADDR_INSN; /* read word at location addr. */ return generic_ptrace_peekdata(child, addr, data); case PTRACE_PEEKUSR: /* read the word at location addr in the USER area. */ return peek_user(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: /* Remove high order bit from address (only for 31 bit). */ addr &= PSW_ADDR_INSN; /* write the word at location addr. */ return generic_ptrace_pokedata(child, addr, data); case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ return poke_user(child, addr, data); case PTRACE_PEEKUSR_AREA: case PTRACE_POKEUSR_AREA: if (copy_from_user(&parea, (void __force __user *) addr, sizeof(parea))) return -EFAULT; addr = parea.kernel_addr; data = parea.process_addr; copied = 0; while (copied < parea.len) { if (request == PTRACE_PEEKUSR_AREA) ret = peek_user(child, addr, data); else { addr_t utmp; if (get_user(utmp, (addr_t __force __user *) data)) return -EFAULT; ret = poke_user(child, addr, utmp); } if (ret) return ret; addr += sizeof(unsigned long); data += sizeof(unsigned long); copied += sizeof(unsigned long); } return 0; } return ptrace_request(child, request, addr, data); } #ifdef CONFIG_COMPAT /* * Now the fun part starts... a 31 bit program running in the * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy * to handle, the difference to the 64 bit versions of the requests * is that the access is done in multiples of 4 byte instead of * 8 bytes (sizeof(unsigned long) on 31/64 bit). * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program * is a 31 bit program too, the content of struct user can be * emulated. A 31 bit program peeking into the struct user of * a 64 bit program is a no-no. */ /* * Same as peek_user but for a 31 bit program. */ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) { struct user32 *dummy32 = NULL; per_struct32 *dummy_per32 = NULL; addr_t offset; __u32 tmp; if (addr < (addr_t) &dummy32->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Fake a 31 bit psw mask. */ tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Fake a 31 bit psw address. */ tmp = (__u32) task_pt_regs(child)->psw.addr | PSW32_ADDR_AMODE31; } else { /* gpr 0-15 */ tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + addr*2 + 4); } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); } else if (addr < (addr_t) &dummy32->regs.fp_regs) { /* * prevent reads of padding hole between * orig_gpr2 and fp_regs on s390. */ tmp = 0; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.fp_regs; tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy32->regs.per_info; /* This is magic. See per_struct and per_struct32. */ if ((offset >= (addr_t) &dummy_per32->control_regs && offset < (addr_t) (&dummy_per32->control_regs + 1)) || (offset >= (addr_t) &dummy_per32->starting_addr && offset <= (addr_t) &dummy_per32->ending_addr) || offset == (addr_t) &dummy_per32->lowcore.words.address) offset = offset*2 + 4; else offset = offset*2; tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; } static int peek_user_compat(struct task_struct *child, addr_t addr, addr_t data) { __u32 tmp; if (!test_thread_flag(TIF_31BIT) || (addr & 3) || addr > sizeof(struct user) - 3) return -EIO; tmp = __peek_user_compat(child, addr); return put_user(tmp, (__u32 __user *) data); } /* * Same as poke_user but for a 31 bit program. */ static int __poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { struct user32 *dummy32 = NULL; per_struct32 *dummy_per32 = NULL; __u32 tmp = (__u32) data; addr_t offset; if (addr < (addr_t) &dummy32->regs.acrs) { /* * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Build a 64 bit psw mask from 31 bit mask. */ if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) /* Invalid psw mask. */ return -EINVAL; task_pt_regs(child)->psw.mask = PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ task_pt_regs(child)->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; } else { /* gpr 0-15 */ *(__u32*)((addr_t) &task_pt_regs(child)->psw + addr*2 + 4) = tmp; } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; } else if (addr < (addr_t) &dummy32->regs.fp_regs) { /* * prevent writess of padding hole between * orig_gpr2 and fp_regs on s390. */ return 0; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && (tmp & ~FPC_VALID_MASK) != 0) /* Invalid floating point control. */ return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * per_info is found in the thread structure. */ offset = addr - (addr_t) &dummy32->regs.per_info; /* * This is magic. See per_struct and per_struct32. * By incident the offsets in per_struct are exactly * twice the offsets in per_struct32 for all fields. * The 8 byte fields need special handling though, * because the second half (bytes 4-7) is needed and * not the first half. */ if ((offset >= (addr_t) &dummy_per32->control_regs && offset < (addr_t) (&dummy_per32->control_regs + 1)) || (offset >= (addr_t) &dummy_per32->starting_addr && offset <= (addr_t) &dummy_per32->ending_addr) || offset == (addr_t) &dummy_per32->lowcore.words.address) offset = offset*2 + 4; else offset = offset*2; *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp; } FixPerRegisters(child); return 0; } static int poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { if (!test_thread_flag(TIF_31BIT) || (addr & 3) || addr > sizeof(struct user32) - 3) return -EIO; return __poke_user_compat(child, addr, data); } long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; ptrace_area_emu31 parea; int copied, ret; switch (request) { case PTRACE_PEEKUSR: /* read the word at location addr in the USER area. */ return peek_user_compat(child, addr, data); case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ return poke_user_compat(child, addr, data); case PTRACE_PEEKUSR_AREA: case PTRACE_POKEUSR_AREA: if (copy_from_user(&parea, (void __force __user *) addr, sizeof(parea))) return -EFAULT; addr = parea.kernel_addr; data = parea.process_addr; copied = 0; while (copied < parea.len) { if (request == PTRACE_PEEKUSR_AREA) ret = peek_user_compat(child, addr, data); else { __u32 utmp; if (get_user(utmp, (__u32 __force __user *) data)) return -EFAULT; ret = poke_user_compat(child, addr, utmp); } if (ret) return ret; addr += sizeof(unsigned int); data += sizeof(unsigned int); copied += sizeof(unsigned int); } return 0; } return compat_ptrace_request(child, request, addr, data); } #endif asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { long ret; /* * The sysc_tracesys code in entry.S stored the system * call number to gprs[2]. */ ret = regs->gprs[2]; if (test_thread_flag(TIF_SYSCALL_TRACE) && (tracehook_report_syscall_entry(regs) || regs->gprs[2] >= NR_syscalls)) { /* * Tracing decided this syscall should not happen or the * debugger stored an invalid system call number. Skip * the system call and the system call restart handling. */ regs->svcnr = 0; ret = -1; } if (unlikely(current->audit_context)) audit_syscall_entry(test_thread_flag(TIF_31BIT) ? AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); return ret; } asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) { if (unlikely(current->audit_context)) audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } /* * user_regset definitions. */ static int s390_regs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (target == current) save_access_regs(target->thread.acrs); if (kbuf) { unsigned long *k = kbuf; while (count > 0) { *k++ = __peek_user(target, pos); count -= sizeof(*k); pos += sizeof(*k); } } else { unsigned long __user *u = ubuf; while (count > 0) { if (__put_user(__peek_user(target, pos), u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int s390_regs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int rc = 0; if (target == current) save_access_regs(target->thread.acrs); if (kbuf) { const unsigned long *k = kbuf; while (count > 0 && !rc) { rc = __poke_user(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const unsigned long __user *u = ubuf; while (count > 0 && !rc) { unsigned long word; rc = __get_user(word, u++); if (rc) break; rc = __poke_user(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } if (rc == 0 && target == current) restore_access_regs(target->thread.acrs); return rc; } static int s390_fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (target == current) save_fp_regs(&target->thread.fp_regs); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fp_regs, 0, -1); } static int s390_fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int rc = 0; if (target == current) save_fp_regs(&target->thread.fp_regs); /* If setting FPC, must validate it first. */ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { u32 fpc[2] = { target->thread.fp_regs.fpc, 0 }; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0) return -EINVAL; target->thread.fp_regs.fpc = fpc[0]; } if (rc == 0 && count > 0) rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, target->thread.fp_regs.fprs, offsetof(s390_fp_regs, fprs), -1); if (rc == 0 && target == current) restore_fp_regs(&target->thread.fp_regs); return rc; } static const struct user_regset s390_regsets[] = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(s390_regs) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = s390_regs_get, .set = s390_regs_set, }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(s390_fp_regs) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = s390_fpregs_get, .set = s390_fpregs_set, }, }; static const struct user_regset_view user_s390_view = { .name = UTS_MACHINE, .e_machine = EM_S390, .regsets = s390_regsets, .n = ARRAY_SIZE(s390_regsets) }; #ifdef CONFIG_COMPAT static int s390_compat_regs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (target == current) save_access_regs(target->thread.acrs); if (kbuf) { compat_ulong_t *k = kbuf; while (count > 0) { *k++ = __peek_user_compat(target, pos); count -= sizeof(*k); pos += sizeof(*k); } } else { compat_ulong_t __user *u = ubuf; while (count > 0) { if (__put_user(__peek_user_compat(target, pos), u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int s390_compat_regs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int rc = 0; if (target == current) save_access_regs(target->thread.acrs); if (kbuf) { const compat_ulong_t *k = kbuf; while (count > 0 && !rc) { rc = __poke_user_compat(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const compat_ulong_t __user *u = ubuf; while (count > 0 && !rc) { compat_ulong_t word; rc = __get_user(word, u++); if (rc) break; rc = __poke_user_compat(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } if (rc == 0 && target == current) restore_access_regs(target->thread.acrs); return rc; } static const struct user_regset s390_compat_regsets[] = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), .get = s390_compat_regs_get, .set = s390_compat_regs_set, }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), .get = s390_fpregs_get, .set = s390_fpregs_set, }, }; static const struct user_regset_view user_s390_compat_view = { .name = "s390", .e_machine = EM_S390, .regsets = s390_compat_regsets, .n = ARRAY_SIZE(s390_compat_regsets) }; #endif const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_31BIT)) return &user_s390_compat_view; #endif return &user_s390_view; }
gpl-2.0
pershoot/kernel-2638
drivers/net/eth16i.c
165
41228
/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux Written 1994-1999 by Mika Kuoppala Copyright (C) 1994-1999 by Mika Kuoppala Based on skeleton.c and heavily on at1700.c by Donald Becker This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as miku@iki.fi This driver supports following cards : - ICL EtherTeam 16i - ICL EtherTeam 32 EISA (Uses true 32 bit transfers rather than 16i compability mode) Example Module usage: insmod eth16i.o io=0x2a0 mediatype=bnc mediatype can be one of the following: bnc,tp,dix,auto,eprom 'auto' will try to autoprobe mediatype. 'eprom' will use whatever type defined in eprom. I have benchmarked driver with PII/300Mhz as a ftp client and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec. Sources: - skeleton.c a sample network driver core for linux, written by Donald Becker <becker@scyld.com> - at1700.c a driver for Allied Telesis AT1700, written by Donald Becker. - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i written by Markku Viima - The Fujitsu MB86965 databook. Author thanks following persons due to their valueble assistance: Markku Viima (ICL) Ari Valve (ICL) Donald Becker Kurt Huwig <kurt@huwig.de> Revision history: Version Date Description 0.01 15.12-94 Initial version (card detection) 0.02 23.01-95 Interrupt is now hooked correctly 0.03 01.02-95 Rewrote initialization part 0.04 07.02-95 Base skeleton done... Made a few changes to signature checking to make it a bit reliable. - fixed bug in tx_buf mapping - fixed bug in initialization (DLC_EN wasn't enabled when initialization was done.) 0.05 08.02-95 If there were more than one packet to send, transmit was jammed due to invalid register write...now fixed 0.06 19.02-95 Rewrote interrupt handling 0.07 13.04-95 Wrote EEPROM read routines Card configuration now set according to data read from EEPROM 0.08 23.06-95 Wrote part that tries to probe used interface port if AUTO is selected 0.09 01.09-95 Added module support 0.10 04.09-95 Fixed receive packet allocation to work with kernels > 1.3.x 0.20 20.09-95 Added support for EtherTeam32 EISA 0.21 17.10-95 Removed the unnecessary extern init_etherdev() declaration. Some other cleanups. 0.22 22.02-96 Receive buffer was not flushed correctly when faulty packet was received. Now fixed. 0.23 26.02-96 Made resetting the adapter more reliable. 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx 0.25 22.05-96 kfree() was missing from cleanup_module. 0.26 11.06-96 Sometimes card was not found by check_signature(). Now made more reliable. 0.27 23.06-96 Oops. 16 consecutive collisions halted adapter. Now will try to retransmit MAX_COL_16 times before finally giving up. 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq 0.29 29.10-97 Multiple card support for module users 0.30 30.10-97 Fixed irq allocation bug. (request_irq moved from probe to open) 0.30a 21.08-98 Card detection made more relaxed. Driver had problems with some TCP/IP-PROM boots to find the card. Suggested by Kurt Huwig <kurt@huwig.de> 0.31 28.08-98 Media interface port can now be selected with module parameters or kernel boot parameters. 0.32 31.08-98 IRQ was never freed if open/close pair wasn't called. Now fixed. 0.33 10.09-98 When eth16i_open() was called after eth16i_close() chip never recovered. Now more shallow reset is made on close. 0.34 29.06-99 Fixed one bad #ifdef. Changed ioaddr -> io for consistency 0.35 01.07-99 transmit,-receive bytes were never updated in stats. Bugs: In some cases the media interface autoprobing code doesn't find the correct interface type. In this case you can manually choose the interface type in DOS with E16IC.EXE which is configuration software for EtherTeam16i and EtherTeam32 cards. This is also true for IRQ setting. You cannot use module parameter to configure IRQ of the card (yet). To do: - Real multicast support - Rewrite the media interface autoprobing code. Its _horrible_ ! - Possibly merge all the MB86965 specific code to external module for use by eth16.c and Donald's at1700.c - IRQ configuration with module parameter. I will do this when i will get enough info about setting irq without configuration utility. */ static char *version = "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/io.h> #include <asm/system.h> #include <asm/dma.h> /* Few macros */ #define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr))) #define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr))) /* This is the I/O address space for Etherteam 16i adapter. */ #define ETH16I_IO_EXTENT 32 /* Ticks before deciding that transmit has timed out */ #define TX_TIMEOUT (400*HZ/1000) /* Maximum loop count when receiving packets */ #define MAX_RX_LOOP 20 /* Some interrupt masks */ #define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */ #define ETH16I_INTR_OFF 0x0000 /* Buffers header status byte meanings */ #define PKT_GOOD BIT(5) #define PKT_GOOD_RMT BIT(4) #define PKT_SHORT BIT(3) #define PKT_ALIGN_ERR BIT(2) #define PKT_CRC_ERR BIT(1) #define PKT_RX_BUF_OVERFLOW BIT(0) /* Transmit status register (DLCR0) */ #define TX_STATUS_REG 0 #define TX_DONE BIT(7) #define NET_BUSY BIT(6) #define TX_PKT_RCD BIT(5) #define CR_LOST BIT(4) #define TX_JABBER_ERR BIT(3) #define COLLISION BIT(2) #define COLLISIONS_16 BIT(1) /* Receive status register (DLCR1) */ #define RX_STATUS_REG 1 #define RX_PKT BIT(7) /* Packet received */ #define BUS_RD_ERR BIT(6) #define SHORT_PKT_ERR BIT(3) #define ALIGN_ERR BIT(2) #define CRC_ERR BIT(1) #define RX_BUF_OVERFLOW BIT(0) /* Transmit Interrupt Enable Register (DLCR2) */ #define TX_INTR_REG 2 #define TX_INTR_DONE BIT(7) #define TX_INTR_COL BIT(2) #define TX_INTR_16_COL BIT(1) /* Receive Interrupt Enable Register (DLCR3) */ #define RX_INTR_REG 3 #define RX_INTR_RECEIVE BIT(7) #define RX_INTR_SHORT_PKT BIT(3) #define RX_INTR_CRC_ERR BIT(1) #define RX_INTR_BUF_OVERFLOW BIT(0) /* Transmit Mode Register (DLCR4) */ #define TRANSMIT_MODE_REG 4 #define LOOPBACK_CONTROL BIT(1) #define CONTROL_OUTPUT BIT(2) /* Receive Mode Register (DLCR5) */ #define RECEIVE_MODE_REG 5 #define RX_BUFFER_EMPTY BIT(6) #define ACCEPT_BAD_PACKETS BIT(5) #define RECEIVE_SHORT_ADDR BIT(4) #define ACCEPT_SHORT_PACKETS BIT(3) #define REMOTE_RESET BIT(2) #define ADDRESS_FILTER_MODE BIT(1) | BIT(0) #define REJECT_ALL 0 #define ACCEPT_ALL 3 #define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */ #define MODE_2 2 /* NODE ID, BC, MC, Hash Table */ /* Configuration Register 0 (DLCR6) */ #define CONFIG_REG_0 6 #define DLC_EN BIT(7) #define SRAM_CYCLE_TIME_100NS BIT(6) #define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */ #define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */ #define TBS1 BIT(3) #define TBS0 BIT(2) #define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */ #define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */ #ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */ #define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */ #endif #define TX_BUF_1x2048 0 #define TX_BUF_2x2048 1 #define TX_BUF_2x4098 2 #define TX_BUF_2x8192 3 /* Configuration Register 1 (DLCR7) */ #define CONFIG_REG_1 7 #define POWERUP BIT(5) /* Transmit start register */ #define TRANSMIT_START_REG 10 #define TRANSMIT_START_RB 2 #define TX_START BIT(7) /* Rest of register bit indicate*/ /* number of packets in tx buffer*/ /* Node ID registers (DLCR8-13) */ #define NODE_ID_0 8 #define NODE_ID_RB 0 /* Hash Table registers (HT8-15) */ #define HASH_TABLE_0 8 #define HASH_TABLE_RB 1 /* Buffer memory ports */ #define BUFFER_MEM_PORT_LB 8 #define DATAPORT BUFFER_MEM_PORT_LB #define BUFFER_MEM_PORT_HB 9 /* 16 Collision control register (BMPR11) */ #define COL_16_REG 11 #define HALT_ON_16 0x00 #define RETRANS_AND_HALT_ON_16 0x02 /* Maximum number of attempts to send after 16 concecutive collisions */ #define MAX_COL_16 10 /* DMA Burst and Transceiver Mode Register (BMPR13) */ #define TRANSCEIVER_MODE_REG 13 #define TRANSCEIVER_MODE_RB 2 #define IO_BASE_UNLOCK BIT(7) #define LOWER_SQUELCH_TRESH BIT(6) #define LINK_TEST_DISABLE BIT(5) #define AUI_SELECT BIT(4) #define DIS_AUTO_PORT_SEL BIT(3) /* Filter Self Receive Register (BMPR14) */ #define FILTER_SELF_RX_REG 14 #define SKIP_RX_PACKET BIT(2) #define FILTER_SELF_RECEIVE BIT(0) /* EEPROM Control Register (BMPR 16) */ #define EEPROM_CTRL_REG 16 /* EEPROM Data Register (BMPR 17) */ #define EEPROM_DATA_REG 17 /* NMC93CSx6 EEPROM Control Bits */ #define CS_0 0x00 #define CS_1 0x20 #define SK_0 0x00 #define SK_1 0x40 #define DI_0 0x00 #define DI_1 0x80 /* NMC93CSx6 EEPROM Instructions */ #define EEPROM_READ 0x80 /* NMC93CSx6 EEPROM Addresses */ #define E_NODEID_0 0x02 #define E_NODEID_1 0x03 #define E_NODEID_2 0x04 #define E_PORT_SELECT 0x14 #define E_PORT_BNC 0x00 #define E_PORT_DIX 0x01 #define E_PORT_TP 0x02 #define E_PORT_AUTO 0x03 #define E_PORT_FROM_EPROM 0x04 #define E_PRODUCT_CFG 0x30 /* Macro to slow down io between EEPROM clock transitions */ #define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0) /* Jumperless Configuration Register (BMPR19) */ #define JUMPERLESS_CONFIG 19 /* ID ROM registers, writing to them also resets some parts of chip */ #define ID_ROM_0 24 #define ID_ROM_7 31 #define RESET ID_ROM_0 /* This is the I/O address list to be probed when seeking the card */ static unsigned int eth16i_portlist[] __initdata = { 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 }; static unsigned int eth32i_portlist[] __initdata = { 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000, 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0 }; /* This is the Interrupt lookup table for Eth16i card */ static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 }; #define NUM_OF_ISA_IRQS 4 /* This is the Interrupt lookup table for Eth32i card */ static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 }; #define EISA_IRQ_REG 0xc89 #define NUM_OF_EISA_IRQS 8 static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 }; /* Use 0 for production, 1 for verification, >2 for debug */ #ifndef ETH16I_DEBUG #define ETH16I_DEBUG 0 #endif static unsigned int eth16i_debug = ETH16I_DEBUG; /* Information for each board */ struct eth16i_local { unsigned char tx_started; unsigned char tx_buf_busy; unsigned short tx_queue; /* Number of packets in transmit buffer */ unsigned short tx_queue_len; unsigned int tx_buf_size; unsigned long open_time; unsigned long tx_buffered_packets; unsigned long tx_buffered_bytes; unsigned long col_16; spinlock_t lock; }; /* Function prototypes */ static int eth16i_probe1(struct net_device *dev, int ioaddr); static int eth16i_check_signature(int ioaddr); static int eth16i_probe_port(int ioaddr); static void eth16i_set_port(int ioaddr, int porttype); static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l); static int eth16i_receive_probe_packet(int ioaddr); static int eth16i_get_irq(int ioaddr); static int eth16i_read_eeprom(int ioaddr, int offset); static int eth16i_read_eeprom_word(int ioaddr); static void eth16i_eeprom_cmd(int ioaddr, unsigned char command); static int eth16i_open(struct net_device *dev); static int eth16i_close(struct net_device *dev); static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev); static void eth16i_rx(struct net_device *dev); static void eth16i_timeout(struct net_device *dev); static irqreturn_t eth16i_interrupt(int irq, void *dev_id); static void eth16i_reset(struct net_device *dev); static void eth16i_timeout(struct net_device *dev); static void eth16i_skip_packet(struct net_device *dev); static void eth16i_multicast(struct net_device *dev); static void eth16i_select_regbank(unsigned char regbank, int ioaddr); static void eth16i_initialize(struct net_device *dev, int boot); #if 0 static int eth16i_set_irq(struct net_device *dev); #endif #ifdef MODULE static ushort eth16i_parse_mediatype(const char* s); #endif static char cardname[] __initdata = "ICL EtherTeam 16i/32"; static int __init do_eth16i_probe(struct net_device *dev) { int i; int ioaddr; int base_addr = dev->base_addr; if(eth16i_debug > 4) printk(KERN_DEBUG "Probing started for %s\n", cardname); if(base_addr > 0x1ff) /* Check only single location */ return eth16i_probe1(dev, base_addr); else if(base_addr != 0) /* Don't probe at all */ return -ENXIO; /* Seek card from the ISA io address space */ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++) if(eth16i_probe1(dev, ioaddr) == 0) return 0; /* Seek card from the EISA io address space */ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++) if(eth16i_probe1(dev, ioaddr) == 0) return 0; return -ENODEV; } #ifndef MODULE struct net_device * __init eth16i_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local)); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_eth16i_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static const struct net_device_ops eth16i_netdev_ops = { .ndo_open = eth16i_open, .ndo_stop = eth16i_close, .ndo_start_xmit = eth16i_tx, .ndo_set_multicast_list = eth16i_multicast, .ndo_tx_timeout = eth16i_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __init eth16i_probe1(struct net_device *dev, int ioaddr) { struct eth16i_local *lp = netdev_priv(dev); static unsigned version_printed; int retval; /* Let's grab the region */ if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname)) return -EBUSY; /* The MB86985 chip has on register which holds information in which io address the chip lies. First read this register and compare it to our current io address and if match then this could be our chip. */ if(ioaddr < 0x1000) { if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)] != ioaddr) { retval = -ENODEV; goto out; } } /* Now we will go a bit deeper and try to find the chip's signature */ if(eth16i_check_signature(ioaddr) != 0) { retval = -ENODEV; goto out; } /* Now it seems that we have found a ethernet chip in this particular ioaddr. The MB86985 chip has this feature, that when you read a certain register it will increase it's io base address to next configurable slot. Now when we have found the chip, first thing is to make sure that the chip's ioaddr will hold still here. */ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); outb(0x00, ioaddr + TRANSCEIVER_MODE_REG); outb(0x00, ioaddr + RESET); /* Reset some parts of chip */ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */ if( (eth16i_debug & version_printed++) == 0) printk(KERN_INFO "%s", version); dev->base_addr = ioaddr; dev->irq = eth16i_get_irq(ioaddr); /* Try to obtain interrupt vector */ if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) { printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n", cardname, ioaddr, dev->irq); goto out; } printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ", dev->name, cardname, ioaddr, dev->irq); /* Now we will have to lock the chip's io address */ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); outb(0x38, ioaddr + TRANSCEIVER_MODE_REG); eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */ /* Now let's same some energy by shutting down the chip ;) */ BITCLR(ioaddr + CONFIG_REG_1, POWERUP); /* Initialize the device structure */ dev->netdev_ops = &eth16i_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; spin_lock_init(&lp->lock); retval = register_netdev(dev); if (retval) goto out1; return 0; out1: free_irq(dev->irq, dev); out: release_region(ioaddr, ETH16I_IO_EXTENT); return retval; } static void eth16i_initialize(struct net_device *dev, int boot) { int ioaddr = dev->base_addr; int i, node_w = 0; unsigned char node_byte = 0; /* Setup station address */ eth16i_select_regbank(NODE_ID_RB, ioaddr); for(i = 0 ; i < 3 ; i++) { unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i); ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val); } for(i = 0; i < 6; i++) { outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i); if(boot) { printk("%02x", inb(ioaddr + NODE_ID_0 + i)); if(i != 5) printk(":"); } } /* Now we will set multicast addresses to accept none */ eth16i_select_regbank(HASH_TABLE_RB, ioaddr); for(i = 0; i < 8; i++) outb(0x00, ioaddr + HASH_TABLE_0 + i); /* Now let's disable the transmitter and receiver, set the buffer ram cycle time, bus width and buffer data path width. Also we shall set transmit buffer size and total buffer size. */ eth16i_select_regbank(2, ioaddr); node_byte = 0; node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG); if( (node_w & 0xFF00) == 0x0800) node_byte |= BUFFER_WIDTH_8; node_byte |= SRAM_BS1; if( (node_w & 0x00FF) == 64) node_byte |= SRAM_BS0; node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2); outb(node_byte, ioaddr + CONFIG_REG_0); /* We shall halt the transmitting, if 16 collisions are detected */ outb(HALT_ON_16, ioaddr + COL_16_REG); #ifdef MODULE /* if_port already set by init_module() */ #else dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ? dev->mem_start : E_PORT_FROM_EPROM; #endif /* Set interface port type */ if(boot) { static const char * const porttype[] = { "BNC", "DIX", "TP", "AUTO", "FROM_EPROM" }; switch(dev->if_port) { case E_PORT_FROM_EPROM: dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT); break; case E_PORT_AUTO: dev->if_port = eth16i_probe_port(ioaddr); break; case E_PORT_BNC: case E_PORT_TP: case E_PORT_DIX: break; } printk(" %s interface.\n", porttype[dev->if_port]); eth16i_set_port(ioaddr, dev->if_port); } /* Set Receive Mode to normal operation */ outb(MODE_2, ioaddr + RECEIVE_MODE_REG); } static int eth16i_probe_port(int ioaddr) { int i; int retcode; unsigned char dummy_packet[64]; /* Powerup the chip */ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1); BITSET(ioaddr + CONFIG_REG_0, DLC_EN); eth16i_select_regbank(NODE_ID_RB, ioaddr); for(i = 0; i < 6; i++) { dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i); dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i); } dummy_packet[12] = 0x00; dummy_packet[13] = 0x04; memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14); eth16i_select_regbank(2, ioaddr); for(i = 0; i < 3; i++) { BITSET(ioaddr + CONFIG_REG_0, DLC_EN); BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); eth16i_set_port(ioaddr, i); if(eth16i_debug > 1) printk(KERN_DEBUG "Set port number %d\n", i); retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64); if(retcode == 0) { retcode = eth16i_receive_probe_packet(ioaddr); if(retcode != -1) { if(eth16i_debug > 1) printk(KERN_DEBUG "Eth16i interface port found at %d\n", i); return i; } } else { if(eth16i_debug > 1) printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n"); } } if( eth16i_debug > 1) printk(KERN_DEBUG "Using default port\n"); return E_PORT_BNC; } static void eth16i_set_port(int ioaddr, int porttype) { unsigned short temp = 0; eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG); temp |= DIS_AUTO_PORT_SEL; switch(porttype) { case E_PORT_BNC : temp |= AUI_SELECT; break; case E_PORT_TP : break; case E_PORT_DIX : temp |= AUI_SELECT; BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT); break; } outb(temp, ioaddr + TRANSCEIVER_MODE_REG); if(eth16i_debug > 1) { printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG)); printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n", inb(ioaddr+TRANSCEIVER_MODE_REG)); } } static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) { unsigned long starttime; outb(0xff, ioaddr + TX_STATUS_REG); outw(l, ioaddr + DATAPORT); outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1); starttime = jiffies; outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { if( time_after(jiffies, starttime + TX_TIMEOUT)) { return -1; } } return 0; } static int eth16i_receive_probe_packet(int ioaddr) { unsigned long starttime; starttime = jiffies; while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { if( time_after(jiffies, starttime + TX_TIMEOUT)) { if(eth16i_debug > 1) printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); starttime = jiffies; while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { if( time_after(jiffies, starttime + TX_TIMEOUT)) { if(eth16i_debug > 1) printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); return -1; } } if(eth16i_debug > 1) printk(KERN_DEBUG "RECEIVE_PACKET\n"); return 0; /* Found receive packet */ } } if(eth16i_debug > 1) { printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG)); printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG)); } return 0; /* Return success */ } #if 0 static int eth16i_set_irq(struct net_device* dev) { const int ioaddr = dev->base_addr; const int irq = dev->irq; int i = 0; if(ioaddr < 0x1000) { while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq) i++; if(i < NUM_OF_ISA_IRQS) { u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG); cbyte = (cbyte & 0x3F) | (i << 6); outb(cbyte, ioaddr + JUMPERLESS_CONFIG); return 0; } } else { printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name); } return -1; } #endif static int __init eth16i_get_irq(int ioaddr) { unsigned char cbyte; if( ioaddr < 0x1000) { cbyte = inb(ioaddr + JUMPERLESS_CONFIG); return eth16i_irqmap[((cbyte & 0xC0) >> 6)]; } else { /* Oh..the card is EISA so method getting IRQ different */ unsigned short index = 0; cbyte = inb(ioaddr + EISA_IRQ_REG); while( (cbyte & 0x01) == 0) { cbyte = cbyte >> 1; index++; } return eth32i_irqmap[index]; } } static int __init eth16i_check_signature(int ioaddr) { int i; unsigned char creg[4] = { 0 }; for(i = 0; i < 4 ; i++) { creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i); if(eth16i_debug > 1) printk("eth16i: read signature byte %x at %x\n", creg[i], ioaddr + TRANSMIT_MODE_REG + i); } creg[0] &= 0x0F; /* Mask collision cnr */ creg[2] &= 0x7F; /* Mask DCLEN bit */ #if 0 /* This was removed because the card was sometimes left to state from which it couldn't be find anymore. If there is need to more strict check still this have to be fixed. */ if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) { if(creg[1] != 0x42) return -1; } #endif if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) { creg[2] &= 0x40; creg[3] &= 0x03; if( !((creg[2] == 0x40) && (creg[3] == 0x00)) ) return -1; } if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0) return -1; if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00) return -1; return 0; } static int eth16i_read_eeprom(int ioaddr, int offset) { int data = 0; eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset); outb(CS_1, ioaddr + EEPROM_CTRL_REG); data = eth16i_read_eeprom_word(ioaddr); outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); return data; } static int eth16i_read_eeprom_word(int ioaddr) { int i; int data = 0; for(i = 16; i > 0; i--) { outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); eeprom_slow_io(); outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); eeprom_slow_io(); data = (data << 1) | ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0); eeprom_slow_io(); } return data; } static void eth16i_eeprom_cmd(int ioaddr, unsigned char command) { int i; outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); outb(DI_0, ioaddr + EEPROM_DATA_REG); outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); outb(DI_1, ioaddr + EEPROM_DATA_REG); outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); for(i = 7; i >= 0; i--) { short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 ); outb(cmd, ioaddr + EEPROM_DATA_REG); outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); eeprom_slow_io(); outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); eeprom_slow_io(); } } static int eth16i_open(struct net_device *dev) { struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* Powerup the chip */ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1); /* Initialize the chip */ eth16i_initialize(dev, 0); /* Set the transmit buffer size */ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03]; if(eth16i_debug > 0) printk(KERN_DEBUG "%s: transmit buffer size %d\n", dev->name, lp->tx_buf_size); /* Now enable Transmitter and Receiver sections */ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); /* Now switch to register bank 2, for run time operation */ eth16i_select_regbank(2, ioaddr); lp->open_time = jiffies; lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; /* Turn on interrupts*/ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); netif_start_queue(dev); return 0; } static int eth16i_close(struct net_device *dev) { struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; eth16i_reset(dev); /* Turn off interrupts*/ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); netif_stop_queue(dev); lp->open_time = 0; /* Disable transmit and receive */ BITSET(ioaddr + CONFIG_REG_0, DLC_EN); /* Reset the chip */ /* outb(0xff, ioaddr + RESET); */ /* outw(0xffff, ioaddr + TX_STATUS_REG); */ outb(0x00, ioaddr + CONFIG_REG_1); return 0; } static void eth16i_timeout(struct net_device *dev) { struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* If we get here, some higher level has decided that we are broken. There should really be a "kick me" function call instead. */ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n", dev->name, inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ? "IRQ conflict" : "network cable problem"); dev->trans_start = jiffies; /* prevent tx timeout */ /* Let's dump all registers */ if(eth16i_debug > 0) { printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n", dev->name, inb(ioaddr + 0), inb(ioaddr + 1), inb(ioaddr + 2), inb(ioaddr + 3), inb(ioaddr + 4), inb(ioaddr + 5), inb(ioaddr + 6), inb(ioaddr + 7)); printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n", dev->name, inb(ioaddr + TRANSMIT_START_REG), inb(ioaddr + COL_16_REG)); printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue); printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); } dev->stats.tx_errors++; eth16i_reset(dev); dev->trans_start = jiffies; /* prevent tx timeout */ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); netif_wake_queue(dev); } static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev) { struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int status = 0; ushort length = skb->len; unsigned char *buf; unsigned long flags; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } buf = skb->data; netif_stop_queue(dev); /* Turn off TX interrupts */ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); /* We would be better doing the disable_irq tricks the 3c509 does, that would make this suck a lot less */ spin_lock_irqsave(&lp->lock, flags); if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) { if(eth16i_debug > 0) printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name); } else { outw(length, ioaddr + DATAPORT); if( ioaddr < 0x1000 ) outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); else { unsigned char frag = length % 4; outsl(ioaddr + DATAPORT, buf, length >> 2); if( frag != 0 ) { outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1); if( frag == 3 ) outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC) + 2), 1); } } lp->tx_buffered_packets++; lp->tx_buffered_bytes = length; lp->tx_queue++; lp->tx_queue_len += length + 2; } lp->tx_buf_busy = 0; if(lp->tx_started == 0) { /* If the transmitter is idle..always trigger a transmit */ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG); lp->tx_queue = 0; lp->tx_queue_len = 0; lp->tx_started = 1; netif_wake_queue(dev); } else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) { /* There is still more room for one more packet in tx buffer */ netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); /* Turn TX interrupts back on */ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */ status = 0; dev_kfree_skb(skb); return NETDEV_TX_OK; } static void eth16i_rx(struct net_device *dev) { int ioaddr = dev->base_addr; int boguscount = MAX_RX_LOOP; /* Loop until all packets have been read */ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) { /* Read status byte from receive buffer */ ushort status = inw(ioaddr + DATAPORT); /* Get the size of the packet from receive buffer */ ushort pkt_len = inw(ioaddr + DATAPORT); if(eth16i_debug > 4) printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n", dev->name, inb(ioaddr + RECEIVE_MODE_REG), status); if( !(status & PKT_GOOD) ) { dev->stats.rx_errors++; if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { dev->stats.rx_length_errors++; eth16i_reset(dev); return; } else { eth16i_skip_packet(dev); dev->stats.rx_dropped++; } } else { /* Ok so now we should have a good packet */ struct sk_buff *skb; skb = dev_alloc_skb(pkt_len + 3); if( skb == NULL ) { printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", dev->name, pkt_len); eth16i_skip_packet(dev); dev->stats.rx_dropped++; break; } skb_reserve(skb,2); /* Now let's get the packet out of buffer. size is (pkt_len + 1) >> 1, cause we are now reading words and it have to be even aligned. */ if(ioaddr < 0x1000) insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); else { unsigned char *buf = skb_put(skb, pkt_len); unsigned char frag = pkt_len % 4; insl(ioaddr + DATAPORT, buf, pkt_len >> 2); if(frag != 0) { unsigned short rest[2]; rest[0] = inw( ioaddr + DATAPORT ); if(frag == 3) rest[1] = inw( ioaddr + DATAPORT ); memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag); } } skb->protocol=eth_type_trans(skb, dev); if( eth16i_debug > 5 ) { int i; printk(KERN_DEBUG "%s: Received packet of length %d.\n", dev->name, pkt_len); for(i = 0; i < 14; i++) printk(KERN_DEBUG " %02x", skb->data[i]); printk(KERN_DEBUG ".\n"); } netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } /* else */ if(--boguscount <= 0) break; } /* while */ } static irqreturn_t eth16i_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct eth16i_local *lp; int ioaddr = 0, status; int handled = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); /* Turn off all interrupts from adapter */ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); /* eth16i_tx won't be called */ spin_lock(&lp->lock); status = inw(ioaddr + TX_STATUS_REG); /* Get the status */ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */ if (status) handled = 1; if(eth16i_debug > 3) printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status); if( status & 0x7f00 ) { dev->stats.rx_errors++; if(status & (BUS_RD_ERR << 8) ) printk(KERN_WARNING "%s: Bus read error.\n",dev->name); if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++; if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++; if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++; if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++; } if( status & 0x001a) { dev->stats.tx_errors++; if(status & CR_LOST) dev->stats.tx_carrier_errors++; if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++; #if 0 if(status & COLLISION) { dev->stats.collisions += ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); } #endif if(status & COLLISIONS_16) { if(lp->col_16 < MAX_COL_16) { lp->col_16++; dev->stats.collisions++; /* Resume transmitting, skip failed packet */ outb(0x02, ioaddr + COL_16_REG); } else { printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name); } } } if( status & 0x00ff ) { /* Let's check the transmit status reg */ if(status & TX_DONE) { /* The transmit has been done */ dev->stats.tx_packets = lp->tx_buffered_packets; dev->stats.tx_bytes += lp->tx_buffered_bytes; lp->col_16 = 0; if(lp->tx_queue) { /* Is there still packets ? */ /* There was packet(s) so start transmitting and write also how many packets there is to be sended */ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG); lp->tx_queue = 0; lp->tx_queue_len = 0; lp->tx_started = 1; } else { lp->tx_started = 0; } netif_wake_queue(dev); } } if( ( status & 0x8000 ) || ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) { eth16i_rx(dev); /* We have packet in receive buffer */ } /* Turn interrupts back on */ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) { /* There is still more room for one more packet in tx buffer */ netif_wake_queue(dev); } spin_unlock(&lp->lock); return IRQ_RETVAL(handled); } static void eth16i_skip_packet(struct net_device *dev) { int ioaddr = dev->base_addr; inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG); while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0); } static void eth16i_reset(struct net_device *dev) { struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; if(eth16i_debug > 1) printk(KERN_DEBUG "%s: Resetting device.\n", dev->name); BITSET(ioaddr + CONFIG_REG_0, DLC_EN); outw(0xffff, ioaddr + TX_STATUS_REG); eth16i_select_regbank(2, ioaddr); lp->tx_started = 0; lp->tx_buf_busy = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); } static void eth16i_multicast(struct net_device *dev) { int ioaddr = dev->base_addr; if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) { outb(3, ioaddr + RECEIVE_MODE_REG); } else { outb(2, ioaddr + RECEIVE_MODE_REG); } } static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) { unsigned char data; data = inb(ioaddr + CONFIG_REG_1); outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1); } #ifdef MODULE static ushort eth16i_parse_mediatype(const char* s) { if(!s) return E_PORT_FROM_EPROM; if (!strncmp(s, "bnc", 3)) return E_PORT_BNC; else if (!strncmp(s, "tp", 2)) return E_PORT_TP; else if (!strncmp(s, "dix", 3)) return E_PORT_DIX; else if (!strncmp(s, "auto", 4)) return E_PORT_AUTO; else return E_PORT_FROM_EPROM; } #define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */ static struct net_device *dev_eth16i[MAX_ETH16I_CARDS]; static int io[MAX_ETH16I_CARDS]; #if 0 static int irq[MAX_ETH16I_CARDS]; #endif static char* mediatype[MAX_ETH16I_CARDS]; static int debug = -1; MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>"); MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver"); MODULE_LICENSE("GPL"); module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "eth16i I/O base address(es)"); #if 0 module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(irq, "eth16i interrupt request number"); #endif module_param_array(mediatype, charp, NULL, 0); MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)"); module_param(debug, int, 0); MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); int __init init_module(void) { int this_dev, found = 0; struct net_device *dev; for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) { dev = alloc_etherdev(sizeof(struct eth16i_local)); if (!dev) break; dev->base_addr = io[this_dev]; if(debug != -1) eth16i_debug = debug; if(eth16i_debug > 1) printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" ); dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]); if(io[this_dev] == 0) { if (this_dev != 0) { /* Only autoprobe 1st one */ free_netdev(dev); break; } printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n"); } if (do_eth16i_probe(dev) == 0) { dev_eth16i[found++] = dev; continue; } printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n", io[this_dev]); free_netdev(dev); break; } if (found) return 0; return -ENXIO; } void __exit cleanup_module(void) { int this_dev; for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) { struct net_device *dev = dev_eth16i[this_dev]; if (netdev_priv(dev)) { unregister_netdev(dev); free_irq(dev->irq, dev); release_region(dev->base_addr, ETH16I_IO_EXTENT); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
kangtastic/kgb
drivers/char/tpm/tpm.c
421
29960
/* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <leendert@watson.ibm.com> * Dave Safford <safford@watson.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. * */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include "tpm.h" enum tpm_const { TPM_MINOR = 224, /* officially assigned */ TPM_BUFSIZE = 4096, TPM_NUM_DEVICES = 256, }; enum tpm_duration { TPM_SHORT = 0, TPM_MEDIUM = 1, TPM_LONG = 2, TPM_UNDEFINED, }; #define TPM_MAX_ORDINAL 243 #define TPM_MAX_PROTECTED_ORDINAL 12 #define TPM_PROTECTED_ORDINAL_MASK 0xFF static LIST_HEAD(tpm_chip_list); static DEFINE_SPINLOCK(driver_lock); static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); /* * Array with one entry per ordinal defining the maximum amount * of time the chip could take to return the result. The ordinal * designation of short, medium or long is defined in a table in * TCG Specification TPM Main Part 2 TPM Structures Section 17. The * values of the SHORT, MEDIUM, and LONG durations are retrieved * from the chip during initialization with a call to tpm_get_timeouts. */ static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = { TPM_UNDEFINED, /* 0 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 5 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 10 */ TPM_SHORT, }; static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = { TPM_UNDEFINED, /* 0 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 5 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 10 */ TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_LONG, TPM_MEDIUM, /* 15 */ TPM_SHORT, TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_SHORT, /* 20 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, /* 25 */ TPM_SHORT, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 30 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 35 */ TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 40 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 45 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_LONG, TPM_MEDIUM, /* 50 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 55 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 60 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 65 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 70 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 75 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 80 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_LONG, TPM_SHORT, TPM_UNDEFINED, /* 85 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 90 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 95 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 100 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 105 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 110 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 115 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 120 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 125 */ TPM_SHORT, TPM_LONG, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 130 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_MEDIUM, TPM_UNDEFINED, /* 135 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 140 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 145 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 150 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 155 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 160 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, /* 165 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 170 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 175 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 180 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, /* 185 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 190 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 195 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 200 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 205 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 210 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, /* 215 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 220 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 225 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 230 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 235 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 240 */ TPM_UNDEFINED, TPM_MEDIUM, }; static void user_reader_timeout(unsigned long ptr) { struct tpm_chip *chip = (struct tpm_chip *) ptr; schedule_work(&chip->work); } static void timeout_work(struct work_struct *work) { struct tpm_chip *chip = container_of(work, struct tpm_chip, work); mutex_lock(&chip->buffer_mutex); atomic_set(&chip->data_pending, 0); memset(chip->data_buffer, 0, TPM_BUFSIZE); mutex_unlock(&chip->buffer_mutex); } /* * Returns max number of jiffies to wait */ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { int duration_idx = TPM_UNDEFINED; int duration = 0; if (ordinal < TPM_MAX_ORDINAL) duration_idx = tpm_ordinal_duration[ordinal]; else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) < TPM_MAX_PROTECTED_ORDINAL) duration_idx = tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; if (duration_idx != TPM_UNDEFINED) duration = chip->vendor.duration[duration_idx]; if (duration <= 0) return 2 * 60 * HZ; else return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); /* * Internal kernel interface to transmit TPM commands */ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, size_t bufsiz) { ssize_t rc; u32 count, ordinal; unsigned long stop; count = be32_to_cpu(*((__be32 *) (buf + 2))); ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); if (count == 0) return -ENODATA; if (count > bufsiz) { dev_err(chip->dev, "invalid count value %x %zx \n", count, bufsiz); return -E2BIG; } mutex_lock(&chip->tpm_mutex); if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { dev_err(chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; } if (chip->vendor.irq) goto out_recv; stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { u8 status = chip->vendor.status(chip); if ((status & chip->vendor.req_complete_mask) == chip->vendor.req_complete_val) goto out_recv; if ((status == chip->vendor.req_canceled)) { dev_err(chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; } msleep(TPM_TIMEOUT); /* CHECK */ rmb(); } while (time_before(jiffies, stop)); chip->vendor.cancel(chip); dev_err(chip->dev, "Operation Timed out\n"); rc = -ETIME; goto out; out_recv: rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz); if (rc < 0) dev_err(chip->dev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: mutex_unlock(&chip->tpm_mutex); return rc; } #define TPM_DIGEST_SIZE 20 #define TPM_ERROR_SIZE 10 #define TPM_RET_CODE_IDX 6 enum tpm_capabilities { TPM_CAP_FLAG = cpu_to_be32(4), TPM_CAP_PROP = cpu_to_be32(5), CAP_VERSION_1_1 = cpu_to_be32(0x06), CAP_VERSION_1_2 = cpu_to_be32(0x1A) }; enum tpm_sub_capabilities { TPM_CAP_PROP_PCR = cpu_to_be32(0x101), TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103), TPM_CAP_FLAG_PERM = cpu_to_be32(0x108), TPM_CAP_FLAG_VOL = cpu_to_be32(0x109), TPM_CAP_PROP_OWNER = cpu_to_be32(0x111), TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115), TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120), }; static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd, int len, const char *desc) { int err; len = tpm_transmit(chip,(u8 *) cmd, len); if (len < 0) return len; if (len == TPM_ERROR_SIZE) { err = be32_to_cpu(cmd->header.out.return_code); dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc); return err; } return 0; } #define TPM_INTERNAL_RESULT_SIZE 200 #define TPM_TAG_RQU_COMMAND cpu_to_be16(193) #define TPM_ORD_GET_CAP cpu_to_be32(101) static const struct tpm_input_header tpm_getcap_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(22), .ordinal = TPM_ORD_GET_CAP }; ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, const char *desc) { struct tpm_cmd_t tpm_cmd; int rc; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_getcap_header; if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { tpm_cmd.params.getcap_in.cap = subcap_id; /*subcap field not necessary */ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0); tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32)); } else { if (subcap_id == TPM_CAP_FLAG_PERM || subcap_id == TPM_CAP_FLAG_VOL) tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG; else tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = subcap_id; } rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc); if (!rc) *cap = tpm_cmd.params.getcap_out.cap; return rc; } void tpm_gen_interrupt(struct tpm_chip *chip) { struct tpm_cmd_t tpm_cmd; ssize_t rc; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the timeouts"); } EXPORT_SYMBOL_GPL(tpm_gen_interrupt); void tpm_get_timeouts(struct tpm_chip *chip) { struct tpm_cmd_t tpm_cmd; struct timeout_t *timeout_cap; struct duration_t *duration_cap; ssize_t rc; u32 timeout; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the timeouts"); if (rc) goto duration; if (be32_to_cpu(tpm_cmd.header.out.length) != 4 * sizeof(u32)) goto duration; timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; /* Don't overwrite default if value is 0 */ timeout = be32_to_cpu(timeout_cap->a); if (timeout) chip->vendor.timeout_a = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->b); if (timeout) chip->vendor.timeout_b = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->c); if (timeout) chip->vendor.timeout_c = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->d); if (timeout) chip->vendor.timeout_d = usecs_to_jiffies(timeout); duration: tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the durations"); if (rc) return; if (be32_to_cpu(tpm_cmd.header.out.return_code) != 3 * sizeof(u32)) return; duration_cap = &tpm_cmd.params.getcap_out.cap.duration; chip->vendor.duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above * value wrong and apparently reports msecs rather than usecs. So we * fix up the resulting too-small TPM_SHORT value to make things work. */ if (chip->vendor.duration[TPM_SHORT] < (HZ/100)) chip->vendor.duration[TPM_SHORT] = HZ; chip->vendor.duration[TPM_MEDIUM] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); chip->vendor.duration[TPM_LONG] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); } EXPORT_SYMBOL_GPL(tpm_get_timeouts); void tpm_continue_selftest(struct tpm_chip *chip) { u8 data[] = { 0, 193, /* TPM_TAG_RQU_COMMAND */ 0, 0, 0, 10, /* length */ 0, 0, 0, 83, /* TPM_ORD_GetCapability */ }; tpm_transmit(chip, data, sizeof(data)); } EXPORT_SYMBOL_GPL(tpm_continue_selftest); ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent enabled state"); if (rc) return 0; rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); return rc; } EXPORT_SYMBOL_GPL(tpm_show_enabled); ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent active state"); if (rc) return 0; rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); return rc; } EXPORT_SYMBOL_GPL(tpm_show_active); ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap, "attempting to determine the owner state"); if (rc) return 0; rc = sprintf(buf, "%d\n", cap.owned); return rc; } EXPORT_SYMBOL_GPL(tpm_show_owned); ssize_t tpm_show_temp_deactivated(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap, "attempting to determine the temporary state"); if (rc) return 0; rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); return rc; } EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated); /* * tpm_chip_find_get - return tpm_chip for given chip number */ static struct tpm_chip *tpm_chip_find_get(int chip_num) { struct tpm_chip *pos, *chip = NULL; rcu_read_lock(); list_for_each_entry_rcu(pos, &tpm_chip_list, list) { if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) continue; if (try_module_get(pos->dev->driver->owner)) { chip = pos; break; } } rcu_read_unlock(); return chip; } #define TPM_ORDINAL_PCRREAD cpu_to_be32(21) #define READ_PCR_RESULT_SIZE 30 static struct tpm_input_header pcrread_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(14), .ordinal = TPM_ORDINAL_PCRREAD }; int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) { int rc; struct tpm_cmd_t cmd; cmd.header.in = pcrread_header; cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, "attempting to read a pcr value"); if (rc == 0) memcpy(res_buf, cmd.params.pcrread_out.pcr_result, TPM_DIGEST_SIZE); return rc; } /** * tpm_pcr_read - read a pcr value * @chip_num: tpm idx # or ANY * @pcr_idx: pcr idx to retrieve * @res_buf: TPM_PCR value * size of res_buf is 20 bytes (or NULL if you don't care) * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing * the module usage count. */ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { struct tpm_chip *chip; int rc; chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; rc = __tpm_pcr_read(chip, pcr_idx, res_buf); module_put(chip->dev->driver->owner); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend pcr value with hash * @chip_num: tpm idx # or AN& * @pcr_idx: pcr idx to extend * @hash: hash value used to extend pcr value * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing * the module usage count. */ #define TPM_ORD_PCR_EXTEND cpu_to_be32(20) #define EXTEND_PCR_RESULT_SIZE 34 static struct tpm_input_header pcrextend_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(34), .ordinal = TPM_ORD_PCR_EXTEND }; int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { struct tpm_cmd_t cmd; int rc; struct tpm_chip *chip; chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; cmd.header.in = pcrextend_header; cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, "attempting extend a PCR value"); module_put(chip->dev->driver->owner); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; u8 digest[TPM_DIGEST_SIZE]; ssize_t rc; int i, j, num_pcrs; char *str = buf; struct tpm_chip *chip = dev_get_drvdata(dev); rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap, "attempting to determine the number of PCRS"); if (rc) return 0; num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { rc = __tpm_pcr_read(chip, i, digest); if (rc) break; str += sprintf(str, "PCR-%02d: ", i); for (j = 0; j < TPM_DIGEST_SIZE; j++) str += sprintf(str, "%02X ", digest[j]); str += sprintf(str, "\n"); } return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_pcrs); #define READ_PUBEK_RESULT_SIZE 314 #define TPM_ORD_READPUBEK cpu_to_be32(124) struct tpm_input_header tpm_readpubek_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(30), .ordinal = TPM_ORD_READPUBEK }; ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, char *buf) { u8 *data; struct tpm_cmd_t tpm_cmd; ssize_t err; int i, rc; char *str = buf; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_readpubek_header; err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, "attempting to read the PUBEK"); if (err) goto out; /* ignore header 10 bytes algorithm 32 bits (1 == RSA ) encscheme 16 bits sigscheme 16 bits parameters (RSA 12->bytes: keybit, #primes, expbit) keylenbytes 32 bits 256 byte modulus ignore checksum 20 bytes */ data = tpm_cmd.params.readpubek_out_buffer; str += sprintf(str, "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" " %02X %02X %02X %02X %02X %02X %02X %02X\n" "Modulus length: %d\nModulus: \n", data[10], data[11], data[12], data[13], data[14], data[15], data[16], data[17], data[22], data[23], data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32], data[33], be32_to_cpu(*((__be32 *) (data + 34)))); for (i = 0; i < 256; i++) { str += sprintf(str, "%02X ", data[i + 38]); if ((i + 1) % 16 == 0) str += sprintf(str, "\n"); } out: rc = str - buf; return rc; } EXPORT_SYMBOL_GPL(tpm_show_pubek); ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; ssize_t rc; char *str = buf; rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer"); if (rc) return 0; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, "attempting to determine the 1.1 version"); if (rc) return 0; str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version.Major, cap.tpm_version.Minor, cap.tpm_version.revMajor, cap.tpm_version.revMinor); return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_caps); ssize_t tpm_show_caps_1_2(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; char *str = buf; rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer"); if (rc) return 0; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version"); if (rc) return 0; str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, cap.tpm_version_1_2.revMajor, cap.tpm_version_1_2.revMinor); return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) return 0; chip->vendor.cancel(chip); return count; } EXPORT_SYMBOL_GPL(tpm_store_cancel); /* * Device file system interface to the TPM * * It's assured that the chip will be opened just once, * by the check of is_open variable, which is protected * by driver_lock. */ int tpm_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct tpm_chip *chip = NULL, *pos; rcu_read_lock(); list_for_each_entry_rcu(pos, &tpm_chip_list, list) { if (pos->vendor.miscdev.minor == minor) { chip = pos; get_device(chip->dev); break; } } rcu_read_unlock(); if (!chip) return -ENODEV; if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); put_device(chip->dev); return -EBUSY; } chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; return 0; } EXPORT_SYMBOL_GPL(tpm_open); /* * Called on file close */ int tpm_release(struct inode *inode, struct file *file) { struct tpm_chip *chip = file->private_data; del_singleshot_timer_sync(&chip->user_read_timer); flush_scheduled_work(); file->private_data = NULL; atomic_set(&chip->data_pending, 0); kfree(chip->data_buffer); clear_bit(0, &chip->is_open); put_device(chip->dev); return 0; } EXPORT_SYMBOL_GPL(tpm_release); ssize_t tpm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; size_t in_size = size, out_size; /* cannot perform a write until the read has cleared either via tpm_read or a user_read_timer timeout */ while (atomic_read(&chip->data_pending) != 0) msleep(TPM_TIMEOUT); mutex_lock(&chip->buffer_mutex); if (in_size > TPM_BUFSIZE) in_size = TPM_BUFSIZE; if (copy_from_user (chip->data_buffer, (void __user *) buf, in_size)) { mutex_unlock(&chip->buffer_mutex); return -EFAULT; } /* atomic tpm command send and result receive */ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); atomic_set(&chip->data_pending, out_size); mutex_unlock(&chip->buffer_mutex); /* Set a timeout by which the reader must come claim the result */ mod_timer(&chip->user_read_timer, jiffies + (60 * HZ)); return in_size; } EXPORT_SYMBOL_GPL(tpm_write); ssize_t tpm_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; ssize_t ret_size; del_singleshot_timer_sync(&chip->user_read_timer); flush_scheduled_work(); ret_size = atomic_read(&chip->data_pending); atomic_set(&chip->data_pending, 0); if (ret_size > 0) { /* relay data */ if (size < ret_size) ret_size = size; mutex_lock(&chip->buffer_mutex); if (copy_to_user(buf, chip->data_buffer, ret_size)) ret_size = -EFAULT; mutex_unlock(&chip->buffer_mutex); } return ret_size; } EXPORT_SYMBOL_GPL(tpm_read); void tpm_remove_hardware(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) { dev_err(dev, "No device data found\n"); return; } spin_lock(&driver_lock); list_del_rcu(&chip->list); spin_unlock(&driver_lock); synchronize_rcu(); misc_deregister(&chip->vendor.miscdev); sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); tpm_bios_log_teardown(chip->bios_dir); /* write it this way to be explicit (chip->dev == dev) */ put_device(chip->dev); } EXPORT_SYMBOL_GPL(tpm_remove_hardware); #define TPM_ORD_SAVESTATE cpu_to_be32(152) #define SAVESTATE_RESULT_SIZE 10 static struct tpm_input_header savestate_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(10), .ordinal = TPM_ORD_SAVESTATE }; /* Bug workaround - some TPM's don't flush the most * recently changed pcr on suspend, so force the flush * with an extend to the selected _unused_ non-volatile pcr. */ static int tpm_suspend_pcr; static int __init tpm_suspend_setup(char *str) { get_option(&str, &tpm_suspend_pcr); return 1; } __setup("tpm_suspend_pcr=", tpm_suspend_setup); /* * We are about to suspend. Save the TPM state * so that it can be restored. */ int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_cmd_t cmd; int rc; u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; if (chip == NULL) return -ENODEV; /* for buggy tpm, flush pcrs with extend to selected dummy */ if (tpm_suspend_pcr) { cmd.header.in = pcrextend_header; cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); memcpy(cmd.params.pcrextend_in.hash, dummy_hash, TPM_DIGEST_SIZE); rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, "extending dummy pcr before suspend"); } /* now do the actual savestate */ cmd.header.in = savestate_header; rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, "sending savestate before suspend"); return rc; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); /* * Resume from a power safe. The BIOS already restored * the TPM state. */ int tpm_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) return -ENODEV; return 0; } EXPORT_SYMBOL_GPL(tpm_pm_resume); /* In case vendor provided release function, call it too.*/ void tpm_dev_vendor_release(struct tpm_chip *chip) { if (chip->vendor.release) chip->vendor.release(chip->dev); clear_bit(chip->dev_num, dev_mask); kfree(chip->vendor.miscdev.name); } EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); /* * Once all references to platform device are down to 0, * release all allocated structures. */ void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); tpm_dev_vendor_release(chip); chip->release(dev); kfree(chip); } EXPORT_SYMBOL_GPL(tpm_dev_release); /* * Called from tpm_<specific>.c probe function only for devices * the driver has determined it should claim. Prior to calling * this function the specific probe function has called pci_enable_device * upon errant exit from this function specific probe function should call * pci_disable_device */ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vendor_specific *entry) { #define DEVNAME_SIZE 7 char *devname; struct tpm_chip *chip; /* Driver specific per-device data */ chip = kzalloc(sizeof(*chip), GFP_KERNEL); devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); if (chip == NULL || devname == NULL) goto out_free; mutex_init(&chip->buffer_mutex); mutex_init(&chip->tpm_mutex); INIT_LIST_HEAD(&chip->list); INIT_WORK(&chip->work, timeout_work); setup_timer(&chip->user_read_timer, user_reader_timeout, (unsigned long)chip); memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific)); chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); if (chip->dev_num >= TPM_NUM_DEVICES) { dev_err(dev, "No available tpm device numbers\n"); goto out_free; } else if (chip->dev_num == 0) chip->vendor.miscdev.minor = TPM_MINOR; else chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR; set_bit(chip->dev_num, dev_mask); scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); chip->vendor.miscdev.name = devname; chip->vendor.miscdev.parent = dev; chip->dev = get_device(dev); chip->release = dev->release; dev->release = tpm_dev_release; dev_set_drvdata(dev, chip); if (misc_register(&chip->vendor.miscdev)) { dev_err(chip->dev, "unable to misc_register %s, minor %d\n", chip->vendor.miscdev.name, chip->vendor.miscdev.minor); put_device(chip->dev); return NULL; } if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { misc_deregister(&chip->vendor.miscdev); put_device(chip->dev); return NULL; } chip->bios_dir = tpm_bios_log_setup(devname); /* Make chip available */ spin_lock(&driver_lock); list_add_rcu(&chip->list, &tpm_chip_list); spin_unlock(&driver_lock); return chip; out_free: kfree(chip); kfree(devname); return NULL; } EXPORT_SYMBOL_GPL(tpm_register_hardware); MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
RWTH-OS/linux
fs/jffs2/security.c
677
1992
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include <linux/security.h> #include "nodelist.h" /* ---- Initial Security Label(s) Attachment callback --- */ static int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, xattr->name, xattr->value, xattr->value_len, 0); if (err < 0) break; } return err; } /* ---- Initial Security Label(s) Attachment ----------- */ int jffs2_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &jffs2_initxattrs, NULL); } /* ---- XATTR Handler for "security.*" ----------------- */ static int jffs2_security_getxattr(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size); } static int jffs2_security_setxattr(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags); } const struct xattr_handler jffs2_security_xattr_handler = { .prefix = XATTR_SECURITY_PREFIX, .set = jffs2_security_setxattr, .get = jffs2_security_getxattr };
gpl-2.0
ptmr3/GalaxyNote_Kernel
sound/pci/rme32.c
677
59215
/* * ALSA driver for RME Digi32, Digi32/8 and Digi32 PRO audio interfaces * * Copyright (c) 2002-2004 Martin Langer <martin-langer@gmx.de>, * Pilo Chambert <pilo.c@wanadoo.fr> * * Thanks to : Anders Torger <torger@ludd.luth.se>, * Henk Hesselink <henk@anda.nl> * for writing the digi96-driver * and RME for all informations. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * **************************************************************************** * * Note #1 "Sek'd models" ................................... martin 2002-12-07 * * Identical soundcards by Sek'd were labeled: * RME Digi 32 = Sek'd Prodif 32 * RME Digi 32 Pro = Sek'd Prodif 96 * RME Digi 32/8 = Sek'd Prodif Gold * * **************************************************************************** * * Note #2 "full duplex mode" ............................... martin 2002-12-07 * * Full duplex doesn't work. All cards (32, 32/8, 32Pro) are working identical * in this mode. Rec data and play data are using the same buffer therefore. At * first you have got the playing bits in the buffer and then (after playing * them) they were overwitten by the captured sound of the CS8412/14. Both * modes (play/record) are running harmonically hand in hand in the same buffer * and you have only one start bit plus one interrupt bit to control this * paired action. * This is opposite to the latter rme96 where playing and capturing is totally * separated and so their full duplex mode is supported by alsa (using two * start bits and two interrupts for two different buffers). * But due to the wrong sequence of playing and capturing ALSA shows no solved * full duplex support for the rme32 at the moment. That's bad, but I'm not * able to solve it. Are you motivated enough to solve this problem now? Your * patch would be welcome! * * **************************************************************************** * * "The story after the long seeking" -- tiwai * * Ok, the situation regarding the full duplex is now improved a bit. * In the fullduplex mode (given by the module parameter), the hardware buffer * is split to halves for read and write directions at the DMA pointer. * That is, the half above the current DMA pointer is used for write, and * the half below is used for read. To mangle this strange behavior, an * software intermediate buffer is introduced. This is, of course, not good * from the viewpoint of the data transfer efficiency. However, this allows * you to use arbitrary buffer sizes, instead of the fixed I/O buffer size. * * **************************************************************************** */ #include <linux/delay.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/pcm-indirect.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int fullduplex[SNDRV_CARDS]; // = {[0 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for RME Digi32 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for RME Digi32 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable RME Digi32 soundcard."); module_param_array(fullduplex, bool, NULL, 0444); MODULE_PARM_DESC(fullduplex, "Support full-duplex mode."); MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>, Pilo Chambert <pilo.c@wanadoo.fr>"); MODULE_DESCRIPTION("RME Digi32, Digi32/8, Digi32 PRO"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{RME,Digi32}," "{RME,Digi32/8}," "{RME,Digi32 PRO}}"); /* Defines for RME Digi32 series */ #define RME32_SPDIF_NCHANNELS 2 /* Playback and capture buffer size */ #define RME32_BUFFER_SIZE 0x20000 /* IO area size */ #define RME32_IO_SIZE 0x30000 /* IO area offsets */ #define RME32_IO_DATA_BUFFER 0x0 #define RME32_IO_CONTROL_REGISTER 0x20000 #define RME32_IO_GET_POS 0x20000 #define RME32_IO_CONFIRM_ACTION_IRQ 0x20004 #define RME32_IO_RESET_POS 0x20100 /* Write control register bits */ #define RME32_WCR_START (1 << 0) /* startbit */ #define RME32_WCR_MONO (1 << 1) /* 0=stereo, 1=mono Setting the whole card to mono doesn't seem to be very useful. A software-solution can handle full-duplex with one direction in stereo and the other way in mono. So, the hardware should work all the time in stereo! */ #define RME32_WCR_MODE24 (1 << 2) /* 0=16bit, 1=32bit */ #define RME32_WCR_SEL (1 << 3) /* 0=input on output, 1=normal playback/capture */ #define RME32_WCR_FREQ_0 (1 << 4) /* frequency (play) */ #define RME32_WCR_FREQ_1 (1 << 5) #define RME32_WCR_INP_0 (1 << 6) /* input switch */ #define RME32_WCR_INP_1 (1 << 7) #define RME32_WCR_RESET (1 << 8) /* Reset address */ #define RME32_WCR_MUTE (1 << 9) /* digital mute for output */ #define RME32_WCR_PRO (1 << 10) /* 1=professional, 0=consumer */ #define RME32_WCR_DS_BM (1 << 11) /* 1=DoubleSpeed (only PRO-Version); 1=BlockMode (only Adat-Version) */ #define RME32_WCR_ADAT (1 << 12) /* Adat Mode (only Adat-Version) */ #define RME32_WCR_AUTOSYNC (1 << 13) /* AutoSync */ #define RME32_WCR_PD (1 << 14) /* DAC Reset (only PRO-Version) */ #define RME32_WCR_EMP (1 << 15) /* 1=Emphasis on (only PRO-Version) */ #define RME32_WCR_BITPOS_FREQ_0 4 #define RME32_WCR_BITPOS_FREQ_1 5 #define RME32_WCR_BITPOS_INP_0 6 #define RME32_WCR_BITPOS_INP_1 7 /* Read control register bits */ #define RME32_RCR_AUDIO_ADDR_MASK 0x1ffff #define RME32_RCR_LOCK (1 << 23) /* 1=locked, 0=not locked */ #define RME32_RCR_ERF (1 << 26) /* 1=Error, 0=no Error */ #define RME32_RCR_FREQ_0 (1 << 27) /* CS841x frequency (record) */ #define RME32_RCR_FREQ_1 (1 << 28) #define RME32_RCR_FREQ_2 (1 << 29) #define RME32_RCR_KMODE (1 << 30) /* card mode: 1=PLL, 0=quartz */ #define RME32_RCR_IRQ (1 << 31) /* interrupt */ #define RME32_RCR_BITPOS_F0 27 #define RME32_RCR_BITPOS_F1 28 #define RME32_RCR_BITPOS_F2 29 /* Input types */ #define RME32_INPUT_OPTICAL 0 #define RME32_INPUT_COAXIAL 1 #define RME32_INPUT_INTERNAL 2 #define RME32_INPUT_XLR 3 /* Clock modes */ #define RME32_CLOCKMODE_SLAVE 0 #define RME32_CLOCKMODE_MASTER_32 1 #define RME32_CLOCKMODE_MASTER_44 2 #define RME32_CLOCKMODE_MASTER_48 3 /* Block sizes in bytes */ #define RME32_BLOCK_SIZE 8192 /* Software intermediate buffer (max) size */ #define RME32_MID_BUFFER_SIZE (1024*1024) /* Hardware revisions */ #define RME32_32_REVISION 192 #define RME32_328_REVISION_OLD 100 #define RME32_328_REVISION_NEW 101 #define RME32_PRO_REVISION_WITH_8412 192 #define RME32_PRO_REVISION_WITH_8414 150 struct rme32 { spinlock_t lock; int irq; unsigned long port; void __iomem *iobase; u32 wcreg; /* cached write control register value */ u32 wcreg_spdif; /* S/PDIF setup */ u32 wcreg_spdif_stream; /* S/PDIF setup (temporary) */ u32 rcreg; /* cached read control register value */ u8 rev; /* card revision number */ struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; int playback_frlog; /* log2 of framesize */ int capture_frlog; size_t playback_periodsize; /* in bytes, zero if not used */ size_t capture_periodsize; /* in bytes, zero if not used */ unsigned int fullduplex_mode; int running; struct snd_pcm_indirect playback_pcm; struct snd_pcm_indirect capture_pcm; struct snd_card *card; struct snd_pcm *spdif_pcm; struct snd_pcm *adat_pcm; struct pci_dev *pci; struct snd_kcontrol *spdif_ctl; }; static DEFINE_PCI_DEVICE_TABLE(snd_rme32_ids) = { {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32), 0,}, {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8), 0,}, {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO), 0,}, {0,} }; MODULE_DEVICE_TABLE(pci, snd_rme32_ids); #define RME32_ISWORKING(rme32) ((rme32)->wcreg & RME32_WCR_START) #define RME32_PRO_WITH_8414(rme32) ((rme32)->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO && (rme32)->rev == RME32_PRO_REVISION_WITH_8414) static int snd_rme32_playback_prepare(struct snd_pcm_substream *substream); static int snd_rme32_capture_prepare(struct snd_pcm_substream *substream); static int snd_rme32_pcm_trigger(struct snd_pcm_substream *substream, int cmd); static void snd_rme32_proc_init(struct rme32 * rme32); static int snd_rme32_create_switches(struct snd_card *card, struct rme32 * rme32); static inline unsigned int snd_rme32_pcm_byteptr(struct rme32 * rme32) { return (readl(rme32->iobase + RME32_IO_GET_POS) & RME32_RCR_AUDIO_ADDR_MASK); } /* silence callback for halfduplex mode */ static int snd_rme32_playback_silence(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); count <<= rme32->playback_frlog; pos <<= rme32->playback_frlog; memset_io(rme32->iobase + RME32_IO_DATA_BUFFER + pos, 0, count); return 0; } /* copy callback for halfduplex mode */ static int snd_rme32_playback_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); count <<= rme32->playback_frlog; pos <<= rme32->playback_frlog; if (copy_from_user_toio(rme32->iobase + RME32_IO_DATA_BUFFER + pos, src, count)) return -EFAULT; return 0; } /* copy callback for halfduplex mode */ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); count <<= rme32->capture_frlog; pos <<= rme32->capture_frlog; if (copy_to_user_fromio(dst, rme32->iobase + RME32_IO_DATA_BUFFER + pos, count)) return -EFAULT; return 0; } /* * SPDIF I/O capabilities (half-duplex mode) */ static struct snd_pcm_hardware snd_rme32_spdif_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = RME32_BUFFER_SIZE, .period_bytes_min = RME32_BLOCK_SIZE, .period_bytes_max = RME32_BLOCK_SIZE, .periods_min = RME32_BUFFER_SIZE / RME32_BLOCK_SIZE, .periods_max = RME32_BUFFER_SIZE / RME32_BLOCK_SIZE, .fifo_size = 0, }; /* * ADAT I/O capabilities (half-duplex mode) */ static struct snd_pcm_hardware snd_rme32_adat_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats= SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 44100, .rate_max = 48000, .channels_min = 8, .channels_max = 8, .buffer_bytes_max = RME32_BUFFER_SIZE, .period_bytes_min = RME32_BLOCK_SIZE, .period_bytes_max = RME32_BLOCK_SIZE, .periods_min = RME32_BUFFER_SIZE / RME32_BLOCK_SIZE, .periods_max = RME32_BUFFER_SIZE / RME32_BLOCK_SIZE, .fifo_size = 0, }; /* * SPDIF I/O capabilities (full-duplex mode) */ static struct snd_pcm_hardware snd_rme32_spdif_fd_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = RME32_MID_BUFFER_SIZE, .period_bytes_min = RME32_BLOCK_SIZE, .period_bytes_max = RME32_BLOCK_SIZE, .periods_min = 2, .periods_max = RME32_MID_BUFFER_SIZE / RME32_BLOCK_SIZE, .fifo_size = 0, }; /* * ADAT I/O capabilities (full-duplex mode) */ static struct snd_pcm_hardware snd_rme32_adat_fd_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats= SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 44100, .rate_max = 48000, .channels_min = 8, .channels_max = 8, .buffer_bytes_max = RME32_MID_BUFFER_SIZE, .period_bytes_min = RME32_BLOCK_SIZE, .period_bytes_max = RME32_BLOCK_SIZE, .periods_min = 2, .periods_max = RME32_MID_BUFFER_SIZE / RME32_BLOCK_SIZE, .fifo_size = 0, }; static void snd_rme32_reset_dac(struct rme32 *rme32) { writel(rme32->wcreg | RME32_WCR_PD, rme32->iobase + RME32_IO_CONTROL_REGISTER); writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); } static int snd_rme32_playback_getrate(struct rme32 * rme32) { int rate; rate = ((rme32->wcreg >> RME32_WCR_BITPOS_FREQ_0) & 1) + (((rme32->wcreg >> RME32_WCR_BITPOS_FREQ_1) & 1) << 1); switch (rate) { case 1: rate = 32000; break; case 2: rate = 44100; break; case 3: rate = 48000; break; default: return -1; } return (rme32->wcreg & RME32_WCR_DS_BM) ? rate << 1 : rate; } static int snd_rme32_capture_getrate(struct rme32 * rme32, int *is_adat) { int n; *is_adat = 0; if (rme32->rcreg & RME32_RCR_LOCK) { /* ADAT rate */ *is_adat = 1; } if (rme32->rcreg & RME32_RCR_ERF) { return -1; } /* S/PDIF rate */ n = ((rme32->rcreg >> RME32_RCR_BITPOS_F0) & 1) + (((rme32->rcreg >> RME32_RCR_BITPOS_F1) & 1) << 1) + (((rme32->rcreg >> RME32_RCR_BITPOS_F2) & 1) << 2); if (RME32_PRO_WITH_8414(rme32)) switch (n) { /* supporting the CS8414 */ case 0: case 1: case 2: return -1; case 3: return 96000; case 4: return 88200; case 5: return 48000; case 6: return 44100; case 7: return 32000; default: return -1; break; } else switch (n) { /* supporting the CS8412 */ case 0: return -1; case 1: return 48000; case 2: return 44100; case 3: return 32000; case 4: return 48000; case 5: return 44100; case 6: return 44056; case 7: return 32000; default: break; } return -1; } static int snd_rme32_playback_setrate(struct rme32 * rme32, int rate) { int ds; ds = rme32->wcreg & RME32_WCR_DS_BM; switch (rate) { case 32000: rme32->wcreg &= ~RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) & ~RME32_WCR_FREQ_1; break; case 44100: rme32->wcreg &= ~RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_1) & ~RME32_WCR_FREQ_0; break; case 48000: rme32->wcreg &= ~RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) | RME32_WCR_FREQ_1; break; case 64000: if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) & ~RME32_WCR_FREQ_1; break; case 88200: if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_1) & ~RME32_WCR_FREQ_0; break; case 96000: if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) | RME32_WCR_FREQ_1; break; default: return -EINVAL; } if ((!ds && rme32->wcreg & RME32_WCR_DS_BM) || (ds && !(rme32->wcreg & RME32_WCR_DS_BM))) { /* change to/from double-speed: reset the DAC (if available) */ snd_rme32_reset_dac(rme32); } else { writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); } return 0; } static int snd_rme32_setclockmode(struct rme32 * rme32, int mode) { switch (mode) { case RME32_CLOCKMODE_SLAVE: /* AutoSync */ rme32->wcreg = (rme32->wcreg & ~RME32_WCR_FREQ_0) & ~RME32_WCR_FREQ_1; break; case RME32_CLOCKMODE_MASTER_32: /* Internal 32.0kHz */ rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) & ~RME32_WCR_FREQ_1; break; case RME32_CLOCKMODE_MASTER_44: /* Internal 44.1kHz */ rme32->wcreg = (rme32->wcreg & ~RME32_WCR_FREQ_0) | RME32_WCR_FREQ_1; break; case RME32_CLOCKMODE_MASTER_48: /* Internal 48.0kHz */ rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) | RME32_WCR_FREQ_1; break; default: return -EINVAL; } writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); return 0; } static int snd_rme32_getclockmode(struct rme32 * rme32) { return ((rme32->wcreg >> RME32_WCR_BITPOS_FREQ_0) & 1) + (((rme32->wcreg >> RME32_WCR_BITPOS_FREQ_1) & 1) << 1); } static int snd_rme32_setinputtype(struct rme32 * rme32, int type) { switch (type) { case RME32_INPUT_OPTICAL: rme32->wcreg = (rme32->wcreg & ~RME32_WCR_INP_0) & ~RME32_WCR_INP_1; break; case RME32_INPUT_COAXIAL: rme32->wcreg = (rme32->wcreg | RME32_WCR_INP_0) & ~RME32_WCR_INP_1; break; case RME32_INPUT_INTERNAL: rme32->wcreg = (rme32->wcreg & ~RME32_WCR_INP_0) | RME32_WCR_INP_1; break; case RME32_INPUT_XLR: rme32->wcreg = (rme32->wcreg | RME32_WCR_INP_0) | RME32_WCR_INP_1; break; default: return -EINVAL; } writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); return 0; } static int snd_rme32_getinputtype(struct rme32 * rme32) { return ((rme32->wcreg >> RME32_WCR_BITPOS_INP_0) & 1) + (((rme32->wcreg >> RME32_WCR_BITPOS_INP_1) & 1) << 1); } static void snd_rme32_setframelog(struct rme32 * rme32, int n_channels, int is_playback) { int frlog; if (n_channels == 2) { frlog = 1; } else { /* assume 8 channels */ frlog = 3; } if (is_playback) { frlog += (rme32->wcreg & RME32_WCR_MODE24) ? 2 : 1; rme32->playback_frlog = frlog; } else { frlog += (rme32->wcreg & RME32_WCR_MODE24) ? 2 : 1; rme32->capture_frlog = frlog; } } static int snd_rme32_setformat(struct rme32 * rme32, int format) { switch (format) { case SNDRV_PCM_FORMAT_S16_LE: rme32->wcreg &= ~RME32_WCR_MODE24; break; case SNDRV_PCM_FORMAT_S32_LE: rme32->wcreg |= RME32_WCR_MODE24; break; default: return -EINVAL; } writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); return 0; } static int snd_rme32_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int err, rate, dummy; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (rme32->fullduplex_mode) { err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); if (err < 0) return err; } else { runtime->dma_area = (void __force *)(rme32->iobase + RME32_IO_DATA_BUFFER); runtime->dma_addr = rme32->port + RME32_IO_DATA_BUFFER; runtime->dma_bytes = RME32_BUFFER_SIZE; } spin_lock_irq(&rme32->lock); if ((rme32->rcreg & RME32_RCR_KMODE) && (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) { /* AutoSync */ if ((int)params_rate(params) != rate) { spin_unlock_irq(&rme32->lock); return -EIO; } } else if ((err = snd_rme32_playback_setrate(rme32, params_rate(params))) < 0) { spin_unlock_irq(&rme32->lock); return err; } if ((err = snd_rme32_setformat(rme32, params_format(params))) < 0) { spin_unlock_irq(&rme32->lock); return err; } snd_rme32_setframelog(rme32, params_channels(params), 1); if (rme32->capture_periodsize != 0) { if (params_period_size(params) << rme32->playback_frlog != rme32->capture_periodsize) { spin_unlock_irq(&rme32->lock); return -EBUSY; } } rme32->playback_periodsize = params_period_size(params) << rme32->playback_frlog; /* S/PDIF setup */ if ((rme32->wcreg & RME32_WCR_ADAT) == 0) { rme32->wcreg &= ~(RME32_WCR_PRO | RME32_WCR_EMP); rme32->wcreg |= rme32->wcreg_spdif_stream; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); } spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int err, isadat, rate; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (rme32->fullduplex_mode) { err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); if (err < 0) return err; } else { runtime->dma_area = (void __force *)rme32->iobase + RME32_IO_DATA_BUFFER; runtime->dma_addr = rme32->port + RME32_IO_DATA_BUFFER; runtime->dma_bytes = RME32_BUFFER_SIZE; } spin_lock_irq(&rme32->lock); /* enable AutoSync for record-preparing */ rme32->wcreg |= RME32_WCR_AUTOSYNC; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); if ((err = snd_rme32_setformat(rme32, params_format(params))) < 0) { spin_unlock_irq(&rme32->lock); return err; } if ((err = snd_rme32_playback_setrate(rme32, params_rate(params))) < 0) { spin_unlock_irq(&rme32->lock); return err; } if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) { if ((int)params_rate(params) != rate) { spin_unlock_irq(&rme32->lock); return -EIO; } if ((isadat && runtime->hw.channels_min == 2) || (!isadat && runtime->hw.channels_min == 8)) { spin_unlock_irq(&rme32->lock); return -EIO; } } /* AutoSync off for recording */ rme32->wcreg &= ~RME32_WCR_AUTOSYNC; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); snd_rme32_setframelog(rme32, params_channels(params), 0); if (rme32->playback_periodsize != 0) { if (params_period_size(params) << rme32->capture_frlog != rme32->playback_periodsize) { spin_unlock_irq(&rme32->lock); return -EBUSY; } } rme32->capture_periodsize = params_period_size(params) << rme32->capture_frlog; spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_pcm_hw_free(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); if (! rme32->fullduplex_mode) return 0; return snd_pcm_lib_free_pages(substream); } static void snd_rme32_pcm_start(struct rme32 * rme32, int from_pause) { if (!from_pause) { writel(0, rme32->iobase + RME32_IO_RESET_POS); } rme32->wcreg |= RME32_WCR_START; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); } static void snd_rme32_pcm_stop(struct rme32 * rme32, int to_pause) { /* * Check if there is an unconfirmed IRQ, if so confirm it, or else * the hardware will not stop generating interrupts */ rme32->rcreg = readl(rme32->iobase + RME32_IO_CONTROL_REGISTER); if (rme32->rcreg & RME32_RCR_IRQ) { writel(0, rme32->iobase + RME32_IO_CONFIRM_ACTION_IRQ); } rme32->wcreg &= ~RME32_WCR_START; if (rme32->wcreg & RME32_WCR_SEL) rme32->wcreg |= RME32_WCR_MUTE; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); if (! to_pause) writel(0, rme32->iobase + RME32_IO_RESET_POS); } static irqreturn_t snd_rme32_interrupt(int irq, void *dev_id) { struct rme32 *rme32 = (struct rme32 *) dev_id; rme32->rcreg = readl(rme32->iobase + RME32_IO_CONTROL_REGISTER); if (!(rme32->rcreg & RME32_RCR_IRQ)) { return IRQ_NONE; } else { if (rme32->capture_substream) { snd_pcm_period_elapsed(rme32->capture_substream); } if (rme32->playback_substream) { snd_pcm_period_elapsed(rme32->playback_substream); } writel(0, rme32->iobase + RME32_IO_CONFIRM_ACTION_IRQ); } return IRQ_HANDLED; } static unsigned int period_bytes[] = { RME32_BLOCK_SIZE }; static struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = { .count = ARRAY_SIZE(period_bytes), .list = period_bytes, .mask = 0 }; static void snd_rme32_set_buffer_constraint(struct rme32 *rme32, struct snd_pcm_runtime *runtime) { if (! rme32->fullduplex_mode) { snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, RME32_BUFFER_SIZE, RME32_BUFFER_SIZE); snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_bytes); } } static int snd_rme32_playback_spdif_open(struct snd_pcm_substream *substream) { int rate, dummy; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_set_sync(substream); spin_lock_irq(&rme32->lock); if (rme32->playback_substream != NULL) { spin_unlock_irq(&rme32->lock); return -EBUSY; } rme32->wcreg &= ~RME32_WCR_ADAT; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); rme32->playback_substream = substream; spin_unlock_irq(&rme32->lock); if (rme32->fullduplex_mode) runtime->hw = snd_rme32_spdif_fd_info; else runtime->hw = snd_rme32_spdif_info; if (rme32->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO) { runtime->hw.rates |= SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } if ((rme32->rcreg & RME32_RCR_KMODE) && (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) { /* AutoSync */ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } snd_rme32_set_buffer_constraint(rme32, runtime); rme32->wcreg_spdif_stream = rme32->wcreg_spdif; rme32->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme32->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme32->spdif_ctl->id); return 0; } static int snd_rme32_capture_spdif_open(struct snd_pcm_substream *substream) { int isadat, rate; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_set_sync(substream); spin_lock_irq(&rme32->lock); if (rme32->capture_substream != NULL) { spin_unlock_irq(&rme32->lock); return -EBUSY; } rme32->capture_substream = substream; spin_unlock_irq(&rme32->lock); if (rme32->fullduplex_mode) runtime->hw = snd_rme32_spdif_fd_info; else runtime->hw = snd_rme32_spdif_info; if (RME32_PRO_WITH_8414(rme32)) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) { if (isadat) { return -EIO; } runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } snd_rme32_set_buffer_constraint(rme32, runtime); return 0; } static int snd_rme32_playback_adat_open(struct snd_pcm_substream *substream) { int rate, dummy; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_set_sync(substream); spin_lock_irq(&rme32->lock); if (rme32->playback_substream != NULL) { spin_unlock_irq(&rme32->lock); return -EBUSY; } rme32->wcreg |= RME32_WCR_ADAT; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); rme32->playback_substream = substream; spin_unlock_irq(&rme32->lock); if (rme32->fullduplex_mode) runtime->hw = snd_rme32_adat_fd_info; else runtime->hw = snd_rme32_adat_info; if ((rme32->rcreg & RME32_RCR_KMODE) && (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) { /* AutoSync */ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } snd_rme32_set_buffer_constraint(rme32, runtime); return 0; } static int snd_rme32_capture_adat_open(struct snd_pcm_substream *substream) { int isadat, rate; struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (rme32->fullduplex_mode) runtime->hw = snd_rme32_adat_fd_info; else runtime->hw = snd_rme32_adat_info; if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) { if (!isadat) { return -EIO; } runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } snd_pcm_set_sync(substream); spin_lock_irq(&rme32->lock); if (rme32->capture_substream != NULL) { spin_unlock_irq(&rme32->lock); return -EBUSY; } rme32->capture_substream = substream; spin_unlock_irq(&rme32->lock); snd_rme32_set_buffer_constraint(rme32, runtime); return 0; } static int snd_rme32_playback_close(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); int spdif = 0; spin_lock_irq(&rme32->lock); rme32->playback_substream = NULL; rme32->playback_periodsize = 0; spdif = (rme32->wcreg & RME32_WCR_ADAT) == 0; spin_unlock_irq(&rme32->lock); if (spdif) { rme32->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme32->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme32->spdif_ctl->id); } return 0; } static int snd_rme32_capture_close(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme32->lock); rme32->capture_substream = NULL; rme32->capture_periodsize = 0; spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_playback_prepare(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme32->lock); if (rme32->fullduplex_mode) { memset(&rme32->playback_pcm, 0, sizeof(rme32->playback_pcm)); rme32->playback_pcm.hw_buffer_size = RME32_BUFFER_SIZE; rme32->playback_pcm.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); } else { writel(0, rme32->iobase + RME32_IO_RESET_POS); } if (rme32->wcreg & RME32_WCR_SEL) rme32->wcreg &= ~RME32_WCR_MUTE; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_capture_prepare(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme32->lock); if (rme32->fullduplex_mode) { memset(&rme32->capture_pcm, 0, sizeof(rme32->capture_pcm)); rme32->capture_pcm.hw_buffer_size = RME32_BUFFER_SIZE; rme32->capture_pcm.hw_queue_size = RME32_BUFFER_SIZE / 2; rme32->capture_pcm.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); } else { writel(0, rme32->iobase + RME32_IO_RESET_POS); } spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; spin_lock(&rme32->lock); snd_pcm_group_for_each_entry(s, substream) { if (s != rme32->playback_substream && s != rme32->capture_substream) continue; switch (cmd) { case SNDRV_PCM_TRIGGER_START: rme32->running |= (1 << s->stream); if (rme32->fullduplex_mode) { /* remember the current DMA position */ if (s == rme32->playback_substream) { rme32->playback_pcm.hw_io = rme32->playback_pcm.hw_data = snd_rme32_pcm_byteptr(rme32); } else { rme32->capture_pcm.hw_io = rme32->capture_pcm.hw_data = snd_rme32_pcm_byteptr(rme32); } } break; case SNDRV_PCM_TRIGGER_STOP: rme32->running &= ~(1 << s->stream); break; } snd_pcm_trigger_done(s, substream); } /* prefill playback buffer */ if (cmd == SNDRV_PCM_TRIGGER_START && rme32->fullduplex_mode) { snd_pcm_group_for_each_entry(s, substream) { if (s == rme32->playback_substream) { s->ops->ack(s); break; } } } switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (rme32->running && ! RME32_ISWORKING(rme32)) snd_rme32_pcm_start(rme32, 0); break; case SNDRV_PCM_TRIGGER_STOP: if (! rme32->running && RME32_ISWORKING(rme32)) snd_rme32_pcm_stop(rme32, 0); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (rme32->running && RME32_ISWORKING(rme32)) snd_rme32_pcm_stop(rme32, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (rme32->running && ! RME32_ISWORKING(rme32)) snd_rme32_pcm_start(rme32, 1); break; } spin_unlock(&rme32->lock); return 0; } /* pointer callback for halfduplex mode */ static snd_pcm_uframes_t snd_rme32_playback_pointer(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); return snd_rme32_pcm_byteptr(rme32) >> rme32->playback_frlog; } static snd_pcm_uframes_t snd_rme32_capture_pointer(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); return snd_rme32_pcm_byteptr(rme32) >> rme32->capture_frlog; } /* ack and pointer callbacks for fullduplex mode */ static void snd_rme32_pb_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); memcpy_toio(rme32->iobase + RME32_IO_DATA_BUFFER + rec->hw_data, substream->runtime->dma_area + rec->sw_data, bytes); } static int snd_rme32_playback_fd_ack(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); struct snd_pcm_indirect *rec, *cprec; rec = &rme32->playback_pcm; cprec = &rme32->capture_pcm; spin_lock(&rme32->lock); rec->hw_queue_size = RME32_BUFFER_SIZE; if (rme32->running & (1 << SNDRV_PCM_STREAM_CAPTURE)) rec->hw_queue_size -= cprec->hw_ready; spin_unlock(&rme32->lock); snd_pcm_indirect_playback_transfer(substream, rec, snd_rme32_pb_trans_copy); return 0; } static void snd_rme32_cp_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); memcpy_fromio(substream->runtime->dma_area + rec->sw_data, rme32->iobase + RME32_IO_DATA_BUFFER + rec->hw_data, bytes); } static int snd_rme32_capture_fd_ack(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); snd_pcm_indirect_capture_transfer(substream, &rme32->capture_pcm, snd_rme32_cp_trans_copy); return 0; } static snd_pcm_uframes_t snd_rme32_playback_fd_pointer(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); return snd_pcm_indirect_playback_pointer(substream, &rme32->playback_pcm, snd_rme32_pcm_byteptr(rme32)); } static snd_pcm_uframes_t snd_rme32_capture_fd_pointer(struct snd_pcm_substream *substream) { struct rme32 *rme32 = snd_pcm_substream_chip(substream); return snd_pcm_indirect_capture_pointer(substream, &rme32->capture_pcm, snd_rme32_pcm_byteptr(rme32)); } /* for halfduplex mode */ static struct snd_pcm_ops snd_rme32_playback_spdif_ops = { .open = snd_rme32_playback_spdif_open, .close = snd_rme32_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_playback_hw_params, .hw_free = snd_rme32_pcm_hw_free, .prepare = snd_rme32_playback_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_playback_pointer, .copy = snd_rme32_playback_copy, .silence = snd_rme32_playback_silence, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme32_capture_spdif_ops = { .open = snd_rme32_capture_spdif_open, .close = snd_rme32_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_capture_hw_params, .hw_free = snd_rme32_pcm_hw_free, .prepare = snd_rme32_capture_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_capture_pointer, .copy = snd_rme32_capture_copy, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme32_playback_adat_ops = { .open = snd_rme32_playback_adat_open, .close = snd_rme32_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_playback_hw_params, .prepare = snd_rme32_playback_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_playback_pointer, .copy = snd_rme32_playback_copy, .silence = snd_rme32_playback_silence, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme32_capture_adat_ops = { .open = snd_rme32_capture_adat_open, .close = snd_rme32_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_capture_hw_params, .prepare = snd_rme32_capture_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_capture_pointer, .copy = snd_rme32_capture_copy, .mmap = snd_pcm_lib_mmap_iomem, }; /* for fullduplex mode */ static struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = { .open = snd_rme32_playback_spdif_open, .close = snd_rme32_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_playback_hw_params, .hw_free = snd_rme32_pcm_hw_free, .prepare = snd_rme32_playback_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_playback_fd_pointer, .ack = snd_rme32_playback_fd_ack, }; static struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = { .open = snd_rme32_capture_spdif_open, .close = snd_rme32_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_capture_hw_params, .hw_free = snd_rme32_pcm_hw_free, .prepare = snd_rme32_capture_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_capture_fd_pointer, .ack = snd_rme32_capture_fd_ack, }; static struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = { .open = snd_rme32_playback_adat_open, .close = snd_rme32_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_playback_hw_params, .prepare = snd_rme32_playback_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_playback_fd_pointer, .ack = snd_rme32_playback_fd_ack, }; static struct snd_pcm_ops snd_rme32_capture_adat_fd_ops = { .open = snd_rme32_capture_adat_open, .close = snd_rme32_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme32_capture_hw_params, .prepare = snd_rme32_capture_prepare, .trigger = snd_rme32_pcm_trigger, .pointer = snd_rme32_capture_fd_pointer, .ack = snd_rme32_capture_fd_ack, }; static void snd_rme32_free(void *private_data) { struct rme32 *rme32 = (struct rme32 *) private_data; if (rme32 == NULL) { return; } if (rme32->irq >= 0) { snd_rme32_pcm_stop(rme32, 0); free_irq(rme32->irq, (void *) rme32); rme32->irq = -1; } if (rme32->iobase) { iounmap(rme32->iobase); rme32->iobase = NULL; } if (rme32->port) { pci_release_regions(rme32->pci); rme32->port = 0; } pci_disable_device(rme32->pci); } static void snd_rme32_free_spdif_pcm(struct snd_pcm *pcm) { struct rme32 *rme32 = (struct rme32 *) pcm->private_data; rme32->spdif_pcm = NULL; } static void snd_rme32_free_adat_pcm(struct snd_pcm *pcm) { struct rme32 *rme32 = (struct rme32 *) pcm->private_data; rme32->adat_pcm = NULL; } static int __devinit snd_rme32_create(struct rme32 * rme32) { struct pci_dev *pci = rme32->pci; int err; rme32->irq = -1; spin_lock_init(&rme32->lock); if ((err = pci_enable_device(pci)) < 0) return err; if ((err = pci_request_regions(pci, "RME32")) < 0) return err; rme32->port = pci_resource_start(rme32->pci, 0); rme32->iobase = ioremap_nocache(rme32->port, RME32_IO_SIZE); if (!rme32->iobase) { snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n", rme32->port, rme32->port + RME32_IO_SIZE - 1); return -ENOMEM; } if (request_irq(pci->irq, snd_rme32_interrupt, IRQF_SHARED, "RME32", rme32)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); return -EBUSY; } rme32->irq = pci->irq; /* read the card's revision number */ pci_read_config_byte(pci, 8, &rme32->rev); /* set up ALSA pcm device for S/PDIF */ if ((err = snd_pcm_new(rme32->card, "Digi32 IEC958", 0, 1, 1, &rme32->spdif_pcm)) < 0) { return err; } rme32->spdif_pcm->private_data = rme32; rme32->spdif_pcm->private_free = snd_rme32_free_spdif_pcm; strcpy(rme32->spdif_pcm->name, "Digi32 IEC958"); if (rme32->fullduplex_mode) { snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme32_playback_spdif_fd_ops); snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme32_capture_spdif_fd_ops); snd_pcm_lib_preallocate_pages_for_all(rme32->spdif_pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 0, RME32_MID_BUFFER_SIZE); rme32->spdif_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; } else { snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme32_playback_spdif_ops); snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme32_capture_spdif_ops); rme32->spdif_pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX; } /* set up ALSA pcm device for ADAT */ if ((pci->device == PCI_DEVICE_ID_RME_DIGI32) || (pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO)) { /* ADAT is not available on DIGI32 and DIGI32 Pro */ rme32->adat_pcm = NULL; } else { if ((err = snd_pcm_new(rme32->card, "Digi32 ADAT", 1, 1, 1, &rme32->adat_pcm)) < 0) { return err; } rme32->adat_pcm->private_data = rme32; rme32->adat_pcm->private_free = snd_rme32_free_adat_pcm; strcpy(rme32->adat_pcm->name, "Digi32 ADAT"); if (rme32->fullduplex_mode) { snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme32_playback_adat_fd_ops); snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme32_capture_adat_fd_ops); snd_pcm_lib_preallocate_pages_for_all(rme32->adat_pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 0, RME32_MID_BUFFER_SIZE); rme32->adat_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; } else { snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme32_playback_adat_ops); snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme32_capture_adat_ops); rme32->adat_pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX; } } rme32->playback_periodsize = 0; rme32->capture_periodsize = 0; /* make sure playback/capture is stopped, if by some reason active */ snd_rme32_pcm_stop(rme32, 0); /* reset DAC */ snd_rme32_reset_dac(rme32); /* reset buffer pointer */ writel(0, rme32->iobase + RME32_IO_RESET_POS); /* set default values in registers */ rme32->wcreg = RME32_WCR_SEL | /* normal playback */ RME32_WCR_INP_0 | /* input select */ RME32_WCR_MUTE; /* muting on */ writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); /* init switch interface */ if ((err = snd_rme32_create_switches(rme32->card, rme32)) < 0) { return err; } /* init proc interface */ snd_rme32_proc_init(rme32); rme32->capture_substream = NULL; rme32->playback_substream = NULL; return 0; } /* * proc interface */ static void snd_rme32_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffer) { int n; struct rme32 *rme32 = (struct rme32 *) entry->private_data; rme32->rcreg = readl(rme32->iobase + RME32_IO_CONTROL_REGISTER); snd_iprintf(buffer, rme32->card->longname); snd_iprintf(buffer, " (index #%d)\n", rme32->card->number + 1); snd_iprintf(buffer, "\nGeneral settings\n"); if (rme32->fullduplex_mode) snd_iprintf(buffer, " Full-duplex mode\n"); else snd_iprintf(buffer, " Half-duplex mode\n"); if (RME32_PRO_WITH_8414(rme32)) { snd_iprintf(buffer, " receiver: CS8414\n"); } else { snd_iprintf(buffer, " receiver: CS8412\n"); } if (rme32->wcreg & RME32_WCR_MODE24) { snd_iprintf(buffer, " format: 24 bit"); } else { snd_iprintf(buffer, " format: 16 bit"); } if (rme32->wcreg & RME32_WCR_MONO) { snd_iprintf(buffer, ", Mono\n"); } else { snd_iprintf(buffer, ", Stereo\n"); } snd_iprintf(buffer, "\nInput settings\n"); switch (snd_rme32_getinputtype(rme32)) { case RME32_INPUT_OPTICAL: snd_iprintf(buffer, " input: optical"); break; case RME32_INPUT_COAXIAL: snd_iprintf(buffer, " input: coaxial"); break; case RME32_INPUT_INTERNAL: snd_iprintf(buffer, " input: internal"); break; case RME32_INPUT_XLR: snd_iprintf(buffer, " input: XLR"); break; } if (snd_rme32_capture_getrate(rme32, &n) < 0) { snd_iprintf(buffer, "\n sample rate: no valid signal\n"); } else { if (n) { snd_iprintf(buffer, " (8 channels)\n"); } else { snd_iprintf(buffer, " (2 channels)\n"); } snd_iprintf(buffer, " sample rate: %d Hz\n", snd_rme32_capture_getrate(rme32, &n)); } snd_iprintf(buffer, "\nOutput settings\n"); if (rme32->wcreg & RME32_WCR_SEL) { snd_iprintf(buffer, " output signal: normal playback"); } else { snd_iprintf(buffer, " output signal: same as input"); } if (rme32->wcreg & RME32_WCR_MUTE) { snd_iprintf(buffer, " (muted)\n"); } else { snd_iprintf(buffer, "\n"); } /* master output frequency */ if (! ((!(rme32->wcreg & RME32_WCR_FREQ_0)) && (!(rme32->wcreg & RME32_WCR_FREQ_1)))) { snd_iprintf(buffer, " sample rate: %d Hz\n", snd_rme32_playback_getrate(rme32)); } if (rme32->rcreg & RME32_RCR_KMODE) { snd_iprintf(buffer, " sample clock source: AutoSync\n"); } else { snd_iprintf(buffer, " sample clock source: Internal\n"); } if (rme32->wcreg & RME32_WCR_PRO) { snd_iprintf(buffer, " format: AES/EBU (professional)\n"); } else { snd_iprintf(buffer, " format: IEC958 (consumer)\n"); } if (rme32->wcreg & RME32_WCR_EMP) { snd_iprintf(buffer, " emphasis: on\n"); } else { snd_iprintf(buffer, " emphasis: off\n"); } } static void __devinit snd_rme32_proc_init(struct rme32 * rme32) { struct snd_info_entry *entry; if (! snd_card_proc_new(rme32->card, "rme32", &entry)) snd_info_set_text_ops(entry, rme32, snd_rme32_proc_read); } /* * control interface */ #define snd_rme32_info_loopback_control snd_ctl_boolean_mono_info static int snd_rme32_get_loopback_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme32->lock); ucontrol->value.integer.value[0] = rme32->wcreg & RME32_WCR_SEL ? 0 : 1; spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_put_loopback_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.integer.value[0] ? 0 : RME32_WCR_SEL; spin_lock_irq(&rme32->lock); val = (rme32->wcreg & ~RME32_WCR_SEL) | val; change = val != rme32->wcreg; if (ucontrol->value.integer.value[0]) val &= ~RME32_WCR_MUTE; else val |= RME32_WCR_MUTE; rme32->wcreg = val; writel(val, rme32->iobase + RME32_IO_CONTROL_REGISTER); spin_unlock_irq(&rme32->lock); return change; } static int snd_rme32_info_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); static char *texts[4] = { "Optical", "Coaxial", "Internal", "XLR" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; switch (rme32->pci->device) { case PCI_DEVICE_ID_RME_DIGI32: case PCI_DEVICE_ID_RME_DIGI32_8: uinfo->value.enumerated.items = 3; break; case PCI_DEVICE_ID_RME_DIGI32_PRO: uinfo->value.enumerated.items = 4; break; default: snd_BUG(); break; } if (uinfo->value.enumerated.item > uinfo->value.enumerated.items - 1) { uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme32_get_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); unsigned int items = 3; spin_lock_irq(&rme32->lock); ucontrol->value.enumerated.item[0] = snd_rme32_getinputtype(rme32); switch (rme32->pci->device) { case PCI_DEVICE_ID_RME_DIGI32: case PCI_DEVICE_ID_RME_DIGI32_8: items = 3; break; case PCI_DEVICE_ID_RME_DIGI32_PRO: items = 4; break; default: snd_BUG(); break; } if (ucontrol->value.enumerated.item[0] >= items) { ucontrol->value.enumerated.item[0] = items - 1; } spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_put_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); unsigned int val; int change, items = 3; switch (rme32->pci->device) { case PCI_DEVICE_ID_RME_DIGI32: case PCI_DEVICE_ID_RME_DIGI32_8: items = 3; break; case PCI_DEVICE_ID_RME_DIGI32_PRO: items = 4; break; default: snd_BUG(); break; } val = ucontrol->value.enumerated.item[0] % items; spin_lock_irq(&rme32->lock); change = val != (unsigned int)snd_rme32_getinputtype(rme32); snd_rme32_setinputtype(rme32, val); spin_unlock_irq(&rme32->lock); return change; } static int snd_rme32_info_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[4] = { "AutoSync", "Internal 32.0kHz", "Internal 44.1kHz", "Internal 48.0kHz" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item > 3) { uinfo->value.enumerated.item = 3; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme32_get_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme32->lock); ucontrol->value.enumerated.item[0] = snd_rme32_getclockmode(rme32); spin_unlock_irq(&rme32->lock); return 0; } static int snd_rme32_put_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.enumerated.item[0] % 3; spin_lock_irq(&rme32->lock); change = val != (unsigned int)snd_rme32_getclockmode(rme32); snd_rme32_setclockmode(rme32, val); spin_unlock_irq(&rme32->lock); return change; } static u32 snd_rme32_convert_from_aes(struct snd_aes_iec958 * aes) { u32 val = 0; val |= (aes->status[0] & IEC958_AES0_PROFESSIONAL) ? RME32_WCR_PRO : 0; if (val & RME32_WCR_PRO) val |= (aes->status[0] & IEC958_AES0_PRO_EMPHASIS_5015) ? RME32_WCR_EMP : 0; else val |= (aes->status[0] & IEC958_AES0_CON_EMPHASIS_5015) ? RME32_WCR_EMP : 0; return val; } static void snd_rme32_convert_to_aes(struct snd_aes_iec958 * aes, u32 val) { aes->status[0] = ((val & RME32_WCR_PRO) ? IEC958_AES0_PROFESSIONAL : 0); if (val & RME32_WCR_PRO) aes->status[0] |= (val & RME32_WCR_EMP) ? IEC958_AES0_PRO_EMPHASIS_5015 : 0; else aes->status[0] |= (val & RME32_WCR_EMP) ? IEC958_AES0_CON_EMPHASIS_5015 : 0; } static int snd_rme32_control_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme32_control_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); snd_rme32_convert_to_aes(&ucontrol->value.iec958, rme32->wcreg_spdif); return 0; } static int snd_rme32_control_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme32_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme32->lock); change = val != rme32->wcreg_spdif; rme32->wcreg_spdif = val; spin_unlock_irq(&rme32->lock); return change; } static int snd_rme32_control_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme32_control_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); snd_rme32_convert_to_aes(&ucontrol->value.iec958, rme32->wcreg_spdif_stream); return 0; } static int snd_rme32_control_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * ucontrol) { struct rme32 *rme32 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme32_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme32->lock); change = val != rme32->wcreg_spdif_stream; rme32->wcreg_spdif_stream = val; rme32->wcreg &= ~(RME32_WCR_PRO | RME32_WCR_EMP); rme32->wcreg |= val; writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER); spin_unlock_irq(&rme32->lock); return change; } static int snd_rme32_control_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme32_control_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * ucontrol) { ucontrol->value.iec958.status[0] = kcontrol->private_value; return 0; } static struct snd_kcontrol_new snd_rme32_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .info = snd_rme32_control_spdif_info, .get = snd_rme32_control_spdif_get, .put = snd_rme32_control_spdif_put }, { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM), .info = snd_rme32_control_spdif_stream_info, .get = snd_rme32_control_spdif_stream_get, .put = snd_rme32_control_spdif_stream_put }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK), .info = snd_rme32_control_spdif_mask_info, .get = snd_rme32_control_spdif_mask_get, .private_value = IEC958_AES0_PROFESSIONAL | IEC958_AES0_CON_EMPHASIS }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK), .info = snd_rme32_control_spdif_mask_info, .get = snd_rme32_control_spdif_mask_get, .private_value = IEC958_AES0_PROFESSIONAL | IEC958_AES0_PRO_EMPHASIS }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Connector", .info = snd_rme32_info_inputtype_control, .get = snd_rme32_get_inputtype_control, .put = snd_rme32_put_inputtype_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Loopback Input", .info = snd_rme32_info_loopback_control, .get = snd_rme32_get_loopback_control, .put = snd_rme32_put_loopback_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Sample Clock Source", .info = snd_rme32_info_clockmode_control, .get = snd_rme32_get_clockmode_control, .put = snd_rme32_put_clockmode_control } }; static int snd_rme32_create_switches(struct snd_card *card, struct rme32 * rme32) { int idx, err; struct snd_kcontrol *kctl; for (idx = 0; idx < (int)ARRAY_SIZE(snd_rme32_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme32_controls[idx], rme32))) < 0) return err; if (idx == 1) /* IEC958 (S/PDIF) Stream */ rme32->spdif_ctl = kctl; } return 0; } /* * Card initialisation */ static void snd_rme32_card_free(struct snd_card *card) { snd_rme32_free(card->private_data); } static int __devinit snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct rme32 *rme32; struct snd_card *card; int err; if (dev >= SNDRV_CARDS) { return -ENODEV; } if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct rme32), &card); if (err < 0) return err; card->private_free = snd_rme32_card_free; rme32 = (struct rme32 *) card->private_data; rme32->card = card; rme32->pci = pci; snd_card_set_dev(card, &pci->dev); if (fullduplex[dev]) rme32->fullduplex_mode = 1; if ((err = snd_rme32_create(rme32)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "Digi32"); switch (rme32->pci->device) { case PCI_DEVICE_ID_RME_DIGI32: strcpy(card->shortname, "RME Digi32"); break; case PCI_DEVICE_ID_RME_DIGI32_8: strcpy(card->shortname, "RME Digi32/8"); break; case PCI_DEVICE_ID_RME_DIGI32_PRO: strcpy(card->shortname, "RME Digi32 PRO"); break; } sprintf(card->longname, "%s (Rev. %d) at 0x%lx, irq %d", card->shortname, rme32->rev, rme32->port, rme32->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_rme32_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "RME Digi32", .id_table = snd_rme32_ids, .probe = snd_rme32_probe, .remove = __devexit_p(snd_rme32_remove), }; static int __init alsa_card_rme32_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_rme32_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_rme32_init) module_exit(alsa_card_rme32_exit)
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M240S
drivers/xen/evtchn.c
933
12140
/****************************************************************************** * evtchn.c * * Driver for receiving and demuxing event-channel signals. * * Copyright (c) 2004-2005, K A Fraser * Multi-process extensions Copyright (c) 2004, Steven Smith * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/major.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/poll.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/cpu.h> #include <xen/xen.h> #include <xen/events.h> #include <xen/evtchn.h> #include <asm/xen/hypervisor.h> struct per_user_data { struct mutex bind_mutex; /* serialize bind/unbind operations */ /* Notification ring, accessed via /dev/xen/evtchn. */ #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) evtchn_port_t *ring; unsigned int ring_cons, ring_prod, ring_overflow; struct mutex ring_cons_mutex; /* protect against concurrent readers */ /* Processes wait on this queue when ring is empty. */ wait_queue_head_t evtchn_wait; struct fasync_struct *evtchn_async_queue; const char *name; }; /* Who's bound to each port? */ static struct per_user_data *port_user[NR_EVENT_CHANNELS]; static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ irqreturn_t evtchn_interrupt(int irq, void *data) { unsigned int port = (unsigned long)data; struct per_user_data *u; spin_lock(&port_user_lock); u = port_user[port]; disable_irq_nosync(irq); if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; wmb(); /* Ensure ring contents visible */ if (u->ring_cons == u->ring_prod++) { wake_up_interruptible(&u->evtchn_wait); kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); } } else { u->ring_overflow = 1; } spin_unlock(&port_user_lock); return IRQ_HANDLED; } static ssize_t evtchn_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rc; unsigned int c, p, bytes1 = 0, bytes2 = 0; struct per_user_data *u = file->private_data; /* Whole number of ports. */ count &= ~(sizeof(evtchn_port_t)-1); if (count == 0) return 0; if (count > PAGE_SIZE) count = PAGE_SIZE; for (;;) { mutex_lock(&u->ring_cons_mutex); rc = -EFBIG; if (u->ring_overflow) goto unlock_out; c = u->ring_cons; p = u->ring_prod; if (c != p) break; mutex_unlock(&u->ring_cons_mutex); if (file->f_flags & O_NONBLOCK) return -EAGAIN; rc = wait_event_interruptible(u->evtchn_wait, u->ring_cons != u->ring_prod); if (rc) return rc; } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(evtchn_port_t); bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); } else { bytes1 = (p - c) * sizeof(evtchn_port_t); bytes2 = 0; } /* Truncate chunks according to caller's maximum byte count. */ if (bytes1 > count) { bytes1 = count; bytes2 = 0; } else if ((bytes1 + bytes2) > count) { bytes2 = count - bytes1; } rc = -EFAULT; rmb(); /* Ensure that we see the port before we copy it. */ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) goto unlock_out; u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); rc = bytes1 + bytes2; unlock_out: mutex_unlock(&u->ring_cons_mutex); return rc; } static ssize_t evtchn_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int rc, i; evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); struct per_user_data *u = file->private_data; if (kbuf == NULL) return -ENOMEM; /* Whole number of ports. */ count &= ~(sizeof(evtchn_port_t)-1); rc = 0; if (count == 0) goto out; if (count > PAGE_SIZE) count = PAGE_SIZE; rc = -EFAULT; if (copy_from_user(kbuf, buf, count) != 0) goto out; spin_lock_irq(&port_user_lock); for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) enable_irq(irq_from_evtchn(kbuf[i])); spin_unlock_irq(&port_user_lock); rc = count; out: free_page((unsigned long)kbuf); return rc; } static int evtchn_bind_to_user(struct per_user_data *u, int port) { int rc = 0; /* * Ports are never reused, so every caller should pass in a * unique port. * * (Locking not necessary because we haven't registered the * interrupt handler yet, and our caller has already * serialized bind operations.) */ BUG_ON(port_user[port] != NULL); port_user[port] = u; rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, u->name, (void *)(unsigned long)port); if (rc >= 0) rc = 0; return rc; } static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); unbind_from_irqhandler(irq, (void *)(unsigned long)port); /* make sure we unbind the irq handler before clearing the port */ barrier(); port_user[port] = NULL; } static long evtchn_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc; struct per_user_data *u = file->private_data; void __user *uarg = (void __user *) arg; /* Prevent bind from racing with unbind */ mutex_lock(&u->bind_mutex); switch (cmd) { case IOCTL_EVTCHN_BIND_VIRQ: { struct ioctl_evtchn_bind_virq bind; struct evtchn_bind_virq bind_virq; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; bind_virq.virq = bind.virq; bind_virq.vcpu = 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (rc != 0) break; rc = evtchn_bind_to_user(u, bind_virq.port); if (rc == 0) rc = bind_virq.port; break; } case IOCTL_EVTCHN_BIND_INTERDOMAIN: { struct ioctl_evtchn_bind_interdomain bind; struct evtchn_bind_interdomain bind_interdomain; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; bind_interdomain.remote_dom = bind.remote_domain; bind_interdomain.remote_port = bind.remote_port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (rc != 0) break; rc = evtchn_bind_to_user(u, bind_interdomain.local_port); if (rc == 0) rc = bind_interdomain.local_port; break; } case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { struct ioctl_evtchn_bind_unbound_port bind; struct evtchn_alloc_unbound alloc_unbound; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = bind.remote_domain; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) break; rc = evtchn_bind_to_user(u, alloc_unbound.port); if (rc == 0) rc = alloc_unbound.port; break; } case IOCTL_EVTCHN_UNBIND: { struct ioctl_evtchn_unbind unbind; rc = -EFAULT; if (copy_from_user(&unbind, uarg, sizeof(unbind))) break; rc = -EINVAL; if (unbind.port >= NR_EVENT_CHANNELS) break; spin_lock_irq(&port_user_lock); rc = -ENOTCONN; if (port_user[unbind.port] != u) { spin_unlock_irq(&port_user_lock); break; } evtchn_unbind_from_user(u, unbind.port); spin_unlock_irq(&port_user_lock); rc = 0; break; } case IOCTL_EVTCHN_NOTIFY: { struct ioctl_evtchn_notify notify; rc = -EFAULT; if (copy_from_user(&notify, uarg, sizeof(notify))) break; if (notify.port >= NR_EVENT_CHANNELS) { rc = -EINVAL; } else if (port_user[notify.port] != u) { rc = -ENOTCONN; } else { notify_remote_via_evtchn(notify.port); rc = 0; } break; } case IOCTL_EVTCHN_RESET: { /* Initialise the ring to empty. Clear errors. */ mutex_lock(&u->ring_cons_mutex); spin_lock_irq(&port_user_lock); u->ring_cons = u->ring_prod = u->ring_overflow = 0; spin_unlock_irq(&port_user_lock); mutex_unlock(&u->ring_cons_mutex); rc = 0; break; } default: rc = -ENOSYS; break; } mutex_unlock(&u->bind_mutex); return rc; } static unsigned int evtchn_poll(struct file *file, poll_table *wait) { unsigned int mask = POLLOUT | POLLWRNORM; struct per_user_data *u = file->private_data; poll_wait(file, &u->evtchn_wait, wait); if (u->ring_cons != u->ring_prod) mask |= POLLIN | POLLRDNORM; if (u->ring_overflow) mask = POLLERR; return mask; } static int evtchn_fasync(int fd, struct file *filp, int on) { struct per_user_data *u = filp->private_data; return fasync_helper(fd, filp, on, &u->evtchn_async_queue); } static int evtchn_open(struct inode *inode, struct file *filp) { struct per_user_data *u; u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); if (u->name == NULL) { kfree(u); return -ENOMEM; } init_waitqueue_head(&u->evtchn_wait); u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); if (u->ring == NULL) { kfree(u->name); kfree(u); return -ENOMEM; } mutex_init(&u->bind_mutex); mutex_init(&u->ring_cons_mutex); filp->private_data = u; return 0; } static int evtchn_release(struct inode *inode, struct file *filp) { int i; struct per_user_data *u = filp->private_data; spin_lock_irq(&port_user_lock); free_page((unsigned long)u->ring); for (i = 0; i < NR_EVENT_CHANNELS; i++) { if (port_user[i] != u) continue; evtchn_unbind_from_user(port_user[i], i); } spin_unlock_irq(&port_user_lock); kfree(u->name); kfree(u); return 0; } static const struct file_operations evtchn_fops = { .owner = THIS_MODULE, .read = evtchn_read, .write = evtchn_write, .unlocked_ioctl = evtchn_ioctl, .poll = evtchn_poll, .fasync = evtchn_fasync, .open = evtchn_open, .release = evtchn_release, }; static struct miscdevice evtchn_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "evtchn", .fops = &evtchn_fops, }; static int __init evtchn_init(void) { int err; if (!xen_domain()) return -ENODEV; spin_lock_init(&port_user_lock); memset(port_user, 0, sizeof(port_user)); /* Create '/dev/misc/evtchn'. */ err = misc_register(&evtchn_miscdev); if (err != 0) { printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); return err; } printk(KERN_INFO "Event-channel device installed.\n"); return 0; } static void __exit evtchn_cleanup(void) { misc_deregister(&evtchn_miscdev); } module_init(evtchn_init); module_exit(evtchn_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
venue3x40-dev/android_kernel_dell_venue3x40
arch/arm/mach-omap2/opp.c
1189
2765
/* * OMAP SoC specific OPP wrapper function * * Copyright (C) 2009-2010 Texas Instruments Incorporated - http://www.ti.com/ * Nishanth Menon * Kevin Hilman * Copyright (C) 2010 Nokia Corporation. * Eduardo Valentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/opp.h> #include <linux/cpu.h> #include "omap_device.h" #include "omap_opp_data.h" /* Temp variable to allow multiple calls */ static u8 __initdata omap_table_init; /** * omap_init_opp_table() - Initialize opp table as per the CPU type * @opp_def: opp default list for this silicon * @opp_def_size: number of opp entries for this silicon * * Register the initial OPP table with the OPP library based on the CPU * type. This is meant to be used only by SoC specific registration. */ int __init omap_init_opp_table(struct omap_opp_def *opp_def, u32 opp_def_size) { int i, r; if (!opp_def || !opp_def_size) { pr_err("%s: invalid params!\n", __func__); return -EINVAL; } /* * Initialize only if not already initialized even if the previous * call failed, because, no reason we'd succeed again. */ if (omap_table_init) return -EEXIST; omap_table_init = 1; /* Lets now register with OPP library */ for (i = 0; i < opp_def_size; i++, opp_def++) { struct omap_hwmod *oh; struct device *dev; if (!opp_def->hwmod_name) { pr_err("%s: NULL name of omap_hwmod, failing [%d].\n", __func__, i); return -EINVAL; } if (!strncmp(opp_def->hwmod_name, "mpu", 3)) { /* * All current OMAPs share voltage rail and * clock source, so CPU0 is used to represent * the MPU-SS. */ dev = get_cpu_device(0); } else { oh = omap_hwmod_lookup(opp_def->hwmod_name); if (!oh || !oh->od) { pr_debug("%s: no hwmod or odev for %s, [%d] cannot add OPPs.\n", __func__, opp_def->hwmod_name, i); continue; } dev = &oh->od->pdev->dev; } r = opp_add(dev, opp_def->freq, opp_def->u_volt); if (r) { dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n", __func__, opp_def->freq, opp_def->hwmod_name, i, r); } else { if (!opp_def->default_available) r = opp_disable(dev, opp_def->freq); if (r) dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n", __func__, opp_def->freq, opp_def->hwmod_name, i, r); } } return 0; }
gpl-2.0
rictec/huawei_s7_kernel
drivers/ieee1394/init_ohci1394_dma.c
1701
9389
/* * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers * * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> * * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c * this file has functions to: * - scan the PCI very early on boot for all OHCI 1394-compliant controllers * - reset and initialize them and make them join the IEEE1394 bus and * - enable physical DMA on them to allow remote debugging * * All code and data is marked as __init and __initdata, respective as * during boot, all OHCI1394 controllers may be claimed by the firewire * stack and at this point, this code should not touch them anymore. * * To use physical DMA after the initialization of the firewire stack, * be sure that the stack enables it and (re-)attach after the bus reset * which may be caused by the firewire stack initialization. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/interrupt.h> /* for ohci1394.h */ #include <linux/delay.h> #include <linux/pci.h> /* for PCI defines */ #include <linux/init_ohci1394_dma.h> #include <asm/pci-direct.h> /* for direct PCI config space access */ #include <asm/fixmap.h> #include "ieee1394_types.h" #include "ohci1394.h" int __initdata init_ohci1394_dma_early; /* Reads a PHY register of an OHCI-1394 controller */ static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr) { int i; quadlet_t r; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) break; mdelay(1); } r = reg_read(ohci, OHCI1394_PhyControl); return (r & 0x00ff0000) >> 16; } /* Writes to a PHY register of an OHCI-1394 controller */ static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data) { int i; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { u32 r = reg_read(ohci, OHCI1394_PhyControl); if (!(r & 0x00004000)) break; mdelay(1); } } /* Resets an OHCI-1394 controller (for sane state before initialization) */ static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) { int i; reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)) break; mdelay(1); } } /* Basic OHCI-1394 register and port inititalization */ static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci) { quadlet_t bus_options; int num_ports, i; /* Put some defaults to these undefined bus options */ bus_options = reg_read(ohci, OHCI1394_BusOptions); bus_options |= 0x60000000; /* Enable CMC and ISC */ bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ bus_options &= ~0x18000000; /* Disable PMC and BMC */ reg_write(ohci, OHCI1394_BusOptions, bus_options); /* Set the bus number */ reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); /* Enable posted writes */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable); /* Clear link control register */ reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); /* enable phys */ reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvPhyPkt); /* Don't accept phy packets into AR request context */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); /* Clear the Isochonouys interrupt masks */ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); /* Accept asyncronous transfer requests from all nodes for now */ reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000); /* Specify asyncronous transfer retries */ reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); /* We don't want hardware swapping */ reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap); /* Enable link */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); /* If anything is connected to a port, make sure it is enabled */ num_ports = get_phy_reg(ohci, 2) & 0xf; for (i = 0; i < num_ports; i++) { unsigned int status; set_phy_reg(ohci, 7, i); status = get_phy_reg(ohci, 8); if (status & 0x20) set_phy_reg(ohci, 8, status & ~1); } } /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec * specifies that physical DMA is disabled on each bus reset and it * has to be enabled after each bus reset when needed. We resort * to polling here because on early boot, we have no interrupts. */ static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci) { int i, events; for (i=0; i < 9; i++) { mdelay(200); events = reg_read(ohci, OHCI1394_IntEventSet); if (events & OHCI1394_busReset) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); } } /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci) { reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000); } /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci) { /* Start off with a soft reset, clears everything to a sane state. */ init_ohci1394_soft_reset(ohci); /* Accessing some registers without LPS enabled may cause lock up */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); /* Disable and clear interrupts */ reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); mdelay(50); /* Wait 50msec to make sure we have full link enabled */ init_ohci1394_initialize(ohci); /* * The initialization causes at least one IEEE1394 bus reset. Enabling * physical DMA only works *after* *all* bus resets have calmed down: */ init_ohci1394_wait_for_busresets(ohci); /* We had to wait and do this now if we want to debug early problems */ init_ohci1394_enable_physical_dma(ohci); } /** * init_ohci1394_controller - Map the registers of the controller and init DMA * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) { unsigned long ohci_base; struct ti_ohci ohci; printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" " at %02x:%02x.%x\n", num, slot, func); ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) & PCI_BASE_ADDRESS_MEM_MASK; set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE); init_ohci1394_reset_and_init_dma(&ohci); } /** * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them */ void __init init_ohci1394_dma_on_all_controllers(void) { int num, slot, func; if (!early_pci_allowed()) return; /* Poor man's PCI discovery, the only thing we can do at early boot */ for (num = 0; num < 32; num++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { u32 class = read_pci_config(num,slot,func, PCI_CLASS_REVISION); if ((class == 0xffffffff)) continue; /* No device at this func */ if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) continue; /* Not an OHCI-1394 device */ init_ohci1394_controller(num, slot, func); break; /* Assume one controller per device */ } } } printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); } /** * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization */ static int __init setup_ohci1394_dma(char *opt) { if (!strcmp(opt, "early")) init_ohci1394_dma_early = 1; return 0; } /* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ early_param("ohci1394_dma", setup_ohci1394_dma);
gpl-2.0
embeddedarm/linux-3.0.35
drivers/gpu/drm/nouveau/nvc0_grctx.c
1957
111777
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_mm.h" #include "nvc0_graph.h" static void nv_icmd(struct drm_device *dev, u32 icmd, u32 data) { nv_wr32(dev, 0x400204, data); nv_wr32(dev, 0x400200, icmd); while (nv_rd32(dev, 0x400700) & 2) {} } static void nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) { nv_wr32(dev, 0x40448c, data); nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class); } static void nvc0_grctx_generate_9097(struct drm_device *dev) { nv_mthd(dev, 0x9097, 0x0800, 0x00000000); nv_mthd(dev, 0x9097, 0x0840, 0x00000000); nv_mthd(dev, 0x9097, 0x0880, 0x00000000); nv_mthd(dev, 0x9097, 0x08c0, 0x00000000); nv_mthd(dev, 0x9097, 0x0900, 0x00000000); nv_mthd(dev, 0x9097, 0x0940, 0x00000000); nv_mthd(dev, 0x9097, 0x0980, 0x00000000); nv_mthd(dev, 0x9097, 0x09c0, 0x00000000); nv_mthd(dev, 0x9097, 0x0804, 0x00000000); nv_mthd(dev, 0x9097, 0x0844, 0x00000000); nv_mthd(dev, 0x9097, 0x0884, 0x00000000); nv_mthd(dev, 0x9097, 0x08c4, 0x00000000); nv_mthd(dev, 0x9097, 0x0904, 0x00000000); nv_mthd(dev, 0x9097, 0x0944, 0x00000000); nv_mthd(dev, 0x9097, 0x0984, 0x00000000); nv_mthd(dev, 0x9097, 0x09c4, 0x00000000); nv_mthd(dev, 0x9097, 0x0808, 0x00000400); nv_mthd(dev, 0x9097, 0x0848, 0x00000400); nv_mthd(dev, 0x9097, 0x0888, 0x00000400); nv_mthd(dev, 0x9097, 0x08c8, 0x00000400); nv_mthd(dev, 0x9097, 0x0908, 0x00000400); nv_mthd(dev, 0x9097, 0x0948, 0x00000400); nv_mthd(dev, 0x9097, 0x0988, 0x00000400); nv_mthd(dev, 0x9097, 0x09c8, 0x00000400); nv_mthd(dev, 0x9097, 0x080c, 0x00000300); nv_mthd(dev, 0x9097, 0x084c, 0x00000300); nv_mthd(dev, 0x9097, 0x088c, 0x00000300); nv_mthd(dev, 0x9097, 0x08cc, 0x00000300); nv_mthd(dev, 0x9097, 0x090c, 0x00000300); nv_mthd(dev, 0x9097, 0x094c, 0x00000300); nv_mthd(dev, 0x9097, 0x098c, 0x00000300); nv_mthd(dev, 0x9097, 0x09cc, 0x00000300); nv_mthd(dev, 0x9097, 0x0810, 0x000000cf); nv_mthd(dev, 0x9097, 0x0850, 0x00000000); nv_mthd(dev, 0x9097, 0x0890, 0x00000000); nv_mthd(dev, 0x9097, 0x08d0, 0x00000000); nv_mthd(dev, 0x9097, 0x0910, 0x00000000); nv_mthd(dev, 0x9097, 0x0950, 0x00000000); nv_mthd(dev, 0x9097, 0x0990, 0x00000000); nv_mthd(dev, 0x9097, 0x09d0, 0x00000000); nv_mthd(dev, 0x9097, 0x0814, 0x00000040); nv_mthd(dev, 0x9097, 0x0854, 0x00000040); nv_mthd(dev, 0x9097, 0x0894, 0x00000040); nv_mthd(dev, 0x9097, 0x08d4, 0x00000040); nv_mthd(dev, 0x9097, 0x0914, 0x00000040); nv_mthd(dev, 0x9097, 0x0954, 0x00000040); nv_mthd(dev, 0x9097, 0x0994, 0x00000040); nv_mthd(dev, 0x9097, 0x09d4, 0x00000040); nv_mthd(dev, 0x9097, 0x0818, 0x00000001); nv_mthd(dev, 0x9097, 0x0858, 0x00000001); nv_mthd(dev, 0x9097, 0x0898, 0x00000001); nv_mthd(dev, 0x9097, 0x08d8, 0x00000001); nv_mthd(dev, 0x9097, 0x0918, 0x00000001); nv_mthd(dev, 0x9097, 0x0958, 0x00000001); nv_mthd(dev, 0x9097, 0x0998, 0x00000001); nv_mthd(dev, 0x9097, 0x09d8, 0x00000001); nv_mthd(dev, 0x9097, 0x081c, 0x00000000); nv_mthd(dev, 0x9097, 0x085c, 0x00000000); nv_mthd(dev, 0x9097, 0x089c, 0x00000000); nv_mthd(dev, 0x9097, 0x08dc, 0x00000000); nv_mthd(dev, 0x9097, 0x091c, 0x00000000); nv_mthd(dev, 0x9097, 0x095c, 0x00000000); nv_mthd(dev, 0x9097, 0x099c, 0x00000000); nv_mthd(dev, 0x9097, 0x09dc, 0x00000000); nv_mthd(dev, 0x9097, 0x0820, 0x00000000); nv_mthd(dev, 0x9097, 0x0860, 0x00000000); nv_mthd(dev, 0x9097, 0x08a0, 0x00000000); nv_mthd(dev, 0x9097, 0x08e0, 0x00000000); nv_mthd(dev, 0x9097, 0x0920, 0x00000000); nv_mthd(dev, 0x9097, 0x0960, 0x00000000); nv_mthd(dev, 0x9097, 0x09a0, 0x00000000); nv_mthd(dev, 0x9097, 0x09e0, 0x00000000); nv_mthd(dev, 0x9097, 0x2700, 0x00000000); nv_mthd(dev, 0x9097, 0x2720, 0x00000000); nv_mthd(dev, 0x9097, 0x2740, 0x00000000); nv_mthd(dev, 0x9097, 0x2760, 0x00000000); nv_mthd(dev, 0x9097, 0x2780, 0x00000000); nv_mthd(dev, 0x9097, 0x27a0, 0x00000000); nv_mthd(dev, 0x9097, 0x27c0, 0x00000000); nv_mthd(dev, 0x9097, 0x27e0, 0x00000000); nv_mthd(dev, 0x9097, 0x2704, 0x00000000); nv_mthd(dev, 0x9097, 0x2724, 0x00000000); nv_mthd(dev, 0x9097, 0x2744, 0x00000000); nv_mthd(dev, 0x9097, 0x2764, 0x00000000); nv_mthd(dev, 0x9097, 0x2784, 0x00000000); nv_mthd(dev, 0x9097, 0x27a4, 0x00000000); nv_mthd(dev, 0x9097, 0x27c4, 0x00000000); nv_mthd(dev, 0x9097, 0x27e4, 0x00000000); nv_mthd(dev, 0x9097, 0x2708, 0x00000000); nv_mthd(dev, 0x9097, 0x2728, 0x00000000); nv_mthd(dev, 0x9097, 0x2748, 0x00000000); nv_mthd(dev, 0x9097, 0x2768, 0x00000000); nv_mthd(dev, 0x9097, 0x2788, 0x00000000); nv_mthd(dev, 0x9097, 0x27a8, 0x00000000); nv_mthd(dev, 0x9097, 0x27c8, 0x00000000); nv_mthd(dev, 0x9097, 0x27e8, 0x00000000); nv_mthd(dev, 0x9097, 0x270c, 0x00000000); nv_mthd(dev, 0x9097, 0x272c, 0x00000000); nv_mthd(dev, 0x9097, 0x274c, 0x00000000); nv_mthd(dev, 0x9097, 0x276c, 0x00000000); nv_mthd(dev, 0x9097, 0x278c, 0x00000000); nv_mthd(dev, 0x9097, 0x27ac, 0x00000000); nv_mthd(dev, 0x9097, 0x27cc, 0x00000000); nv_mthd(dev, 0x9097, 0x27ec, 0x00000000); nv_mthd(dev, 0x9097, 0x2710, 0x00014000); nv_mthd(dev, 0x9097, 0x2730, 0x00014000); nv_mthd(dev, 0x9097, 0x2750, 0x00014000); nv_mthd(dev, 0x9097, 0x2770, 0x00014000); nv_mthd(dev, 0x9097, 0x2790, 0x00014000); nv_mthd(dev, 0x9097, 0x27b0, 0x00014000); nv_mthd(dev, 0x9097, 0x27d0, 0x00014000); nv_mthd(dev, 0x9097, 0x27f0, 0x00014000); nv_mthd(dev, 0x9097, 0x2714, 0x00000040); nv_mthd(dev, 0x9097, 0x2734, 0x00000040); nv_mthd(dev, 0x9097, 0x2754, 0x00000040); nv_mthd(dev, 0x9097, 0x2774, 0x00000040); nv_mthd(dev, 0x9097, 0x2794, 0x00000040); nv_mthd(dev, 0x9097, 0x27b4, 0x00000040); nv_mthd(dev, 0x9097, 0x27d4, 0x00000040); nv_mthd(dev, 0x9097, 0x27f4, 0x00000040); nv_mthd(dev, 0x9097, 0x1c00, 0x00000000); nv_mthd(dev, 0x9097, 0x1c10, 0x00000000); nv_mthd(dev, 0x9097, 0x1c20, 0x00000000); nv_mthd(dev, 0x9097, 0x1c30, 0x00000000); nv_mthd(dev, 0x9097, 0x1c40, 0x00000000); nv_mthd(dev, 0x9097, 0x1c50, 0x00000000); nv_mthd(dev, 0x9097, 0x1c60, 0x00000000); nv_mthd(dev, 0x9097, 0x1c70, 0x00000000); nv_mthd(dev, 0x9097, 0x1c80, 0x00000000); nv_mthd(dev, 0x9097, 0x1c90, 0x00000000); nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000); nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000); nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000); nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000); nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000); nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000); nv_mthd(dev, 0x9097, 0x1c04, 0x00000000); nv_mthd(dev, 0x9097, 0x1c14, 0x00000000); nv_mthd(dev, 0x9097, 0x1c24, 0x00000000); nv_mthd(dev, 0x9097, 0x1c34, 0x00000000); nv_mthd(dev, 0x9097, 0x1c44, 0x00000000); nv_mthd(dev, 0x9097, 0x1c54, 0x00000000); nv_mthd(dev, 0x9097, 0x1c64, 0x00000000); nv_mthd(dev, 0x9097, 0x1c74, 0x00000000); nv_mthd(dev, 0x9097, 0x1c84, 0x00000000); nv_mthd(dev, 0x9097, 0x1c94, 0x00000000); nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000); nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000); nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000); nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000); nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000); nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000); nv_mthd(dev, 0x9097, 0x1c08, 0x00000000); nv_mthd(dev, 0x9097, 0x1c18, 0x00000000); nv_mthd(dev, 0x9097, 0x1c28, 0x00000000); nv_mthd(dev, 0x9097, 0x1c38, 0x00000000); nv_mthd(dev, 0x9097, 0x1c48, 0x00000000); nv_mthd(dev, 0x9097, 0x1c58, 0x00000000); nv_mthd(dev, 0x9097, 0x1c68, 0x00000000); nv_mthd(dev, 0x9097, 0x1c78, 0x00000000); nv_mthd(dev, 0x9097, 0x1c88, 0x00000000); nv_mthd(dev, 0x9097, 0x1c98, 0x00000000); nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000); nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000); nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000); nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000); nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000); nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000); nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000); nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000); nv_mthd(dev, 0x9097, 0x1cac, 0x00000000); nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000); nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000); nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000); nv_mthd(dev, 0x9097, 0x1cec, 0x00000000); nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000); nv_mthd(dev, 0x9097, 0x1d00, 0x00000000); nv_mthd(dev, 0x9097, 0x1d10, 0x00000000); nv_mthd(dev, 0x9097, 0x1d20, 0x00000000); nv_mthd(dev, 0x9097, 0x1d30, 0x00000000); nv_mthd(dev, 0x9097, 0x1d40, 0x00000000); nv_mthd(dev, 0x9097, 0x1d50, 0x00000000); nv_mthd(dev, 0x9097, 0x1d60, 0x00000000); nv_mthd(dev, 0x9097, 0x1d70, 0x00000000); nv_mthd(dev, 0x9097, 0x1d80, 0x00000000); nv_mthd(dev, 0x9097, 0x1d90, 0x00000000); nv_mthd(dev, 0x9097, 0x1da0, 0x00000000); nv_mthd(dev, 0x9097, 0x1db0, 0x00000000); nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000); nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000); nv_mthd(dev, 0x9097, 0x1de0, 0x00000000); nv_mthd(dev, 0x9097, 0x1df0, 0x00000000); nv_mthd(dev, 0x9097, 0x1d04, 0x00000000); nv_mthd(dev, 0x9097, 0x1d14, 0x00000000); nv_mthd(dev, 0x9097, 0x1d24, 0x00000000); nv_mthd(dev, 0x9097, 0x1d34, 0x00000000); nv_mthd(dev, 0x9097, 0x1d44, 0x00000000); nv_mthd(dev, 0x9097, 0x1d54, 0x00000000); nv_mthd(dev, 0x9097, 0x1d64, 0x00000000); nv_mthd(dev, 0x9097, 0x1d74, 0x00000000); nv_mthd(dev, 0x9097, 0x1d84, 0x00000000); nv_mthd(dev, 0x9097, 0x1d94, 0x00000000); nv_mthd(dev, 0x9097, 0x1da4, 0x00000000); nv_mthd(dev, 0x9097, 0x1db4, 0x00000000); nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000); nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000); nv_mthd(dev, 0x9097, 0x1de4, 0x00000000); nv_mthd(dev, 0x9097, 0x1df4, 0x00000000); nv_mthd(dev, 0x9097, 0x1d08, 0x00000000); nv_mthd(dev, 0x9097, 0x1d18, 0x00000000); nv_mthd(dev, 0x9097, 0x1d28, 0x00000000); nv_mthd(dev, 0x9097, 0x1d38, 0x00000000); nv_mthd(dev, 0x9097, 0x1d48, 0x00000000); nv_mthd(dev, 0x9097, 0x1d58, 0x00000000); nv_mthd(dev, 0x9097, 0x1d68, 0x00000000); nv_mthd(dev, 0x9097, 0x1d78, 0x00000000); nv_mthd(dev, 0x9097, 0x1d88, 0x00000000); nv_mthd(dev, 0x9097, 0x1d98, 0x00000000); nv_mthd(dev, 0x9097, 0x1da8, 0x00000000); nv_mthd(dev, 0x9097, 0x1db8, 0x00000000); nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000); nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000); nv_mthd(dev, 0x9097, 0x1de8, 0x00000000); nv_mthd(dev, 0x9097, 0x1df8, 0x00000000); nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000); nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000); nv_mthd(dev, 0x9097, 0x1dac, 0x00000000); nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000); nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000); nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000); nv_mthd(dev, 0x9097, 0x1dec, 0x00000000); nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000); nv_mthd(dev, 0x9097, 0x1f00, 0x00000000); nv_mthd(dev, 0x9097, 0x1f08, 0x00000000); nv_mthd(dev, 0x9097, 0x1f10, 0x00000000); nv_mthd(dev, 0x9097, 0x1f18, 0x00000000); nv_mthd(dev, 0x9097, 0x1f20, 0x00000000); nv_mthd(dev, 0x9097, 0x1f28, 0x00000000); nv_mthd(dev, 0x9097, 0x1f30, 0x00000000); nv_mthd(dev, 0x9097, 0x1f38, 0x00000000); nv_mthd(dev, 0x9097, 0x1f40, 0x00000000); nv_mthd(dev, 0x9097, 0x1f48, 0x00000000); nv_mthd(dev, 0x9097, 0x1f50, 0x00000000); nv_mthd(dev, 0x9097, 0x1f58, 0x00000000); nv_mthd(dev, 0x9097, 0x1f60, 0x00000000); nv_mthd(dev, 0x9097, 0x1f68, 0x00000000); nv_mthd(dev, 0x9097, 0x1f70, 0x00000000); nv_mthd(dev, 0x9097, 0x1f78, 0x00000000); nv_mthd(dev, 0x9097, 0x1f04, 0x00000000); nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f14, 0x00000000); nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f24, 0x00000000); nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f34, 0x00000000); nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f44, 0x00000000); nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f54, 0x00000000); nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f64, 0x00000000); nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f74, 0x00000000); nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f80, 0x00000000); nv_mthd(dev, 0x9097, 0x1f88, 0x00000000); nv_mthd(dev, 0x9097, 0x1f90, 0x00000000); nv_mthd(dev, 0x9097, 0x1f98, 0x00000000); nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000); nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000); nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000); nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000); nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000); nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000); nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000); nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000); nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000); nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000); nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000); nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000); nv_mthd(dev, 0x9097, 0x1f84, 0x00000000); nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000); nv_mthd(dev, 0x9097, 0x1f94, 0x00000000); nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000); nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000); nv_mthd(dev, 0x9097, 0x1fac, 0x00000000); nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000); nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000); nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000); nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000); nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000); nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000); nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000); nv_mthd(dev, 0x9097, 0x1fec, 0x00000000); nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000); nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000); nv_mthd(dev, 0x9097, 0x2200, 0x00000022); nv_mthd(dev, 0x9097, 0x2210, 0x00000022); nv_mthd(dev, 0x9097, 0x2220, 0x00000022); nv_mthd(dev, 0x9097, 0x2230, 0x00000022); nv_mthd(dev, 0x9097, 0x2240, 0x00000022); nv_mthd(dev, 0x9097, 0x2000, 0x00000000); nv_mthd(dev, 0x9097, 0x2040, 0x00000011); nv_mthd(dev, 0x9097, 0x2080, 0x00000020); nv_mthd(dev, 0x9097, 0x20c0, 0x00000030); nv_mthd(dev, 0x9097, 0x2100, 0x00000040); nv_mthd(dev, 0x9097, 0x2140, 0x00000051); nv_mthd(dev, 0x9097, 0x200c, 0x00000001); nv_mthd(dev, 0x9097, 0x204c, 0x00000001); nv_mthd(dev, 0x9097, 0x208c, 0x00000001); nv_mthd(dev, 0x9097, 0x20cc, 0x00000001); nv_mthd(dev, 0x9097, 0x210c, 0x00000001); nv_mthd(dev, 0x9097, 0x214c, 0x00000001); nv_mthd(dev, 0x9097, 0x2010, 0x00000000); nv_mthd(dev, 0x9097, 0x2050, 0x00000000); nv_mthd(dev, 0x9097, 0x2090, 0x00000001); nv_mthd(dev, 0x9097, 0x20d0, 0x00000002); nv_mthd(dev, 0x9097, 0x2110, 0x00000003); nv_mthd(dev, 0x9097, 0x2150, 0x00000004); nv_mthd(dev, 0x9097, 0x0380, 0x00000000); nv_mthd(dev, 0x9097, 0x03a0, 0x00000000); nv_mthd(dev, 0x9097, 0x03c0, 0x00000000); nv_mthd(dev, 0x9097, 0x03e0, 0x00000000); nv_mthd(dev, 0x9097, 0x0384, 0x00000000); nv_mthd(dev, 0x9097, 0x03a4, 0x00000000); nv_mthd(dev, 0x9097, 0x03c4, 0x00000000); nv_mthd(dev, 0x9097, 0x03e4, 0x00000000); nv_mthd(dev, 0x9097, 0x0388, 0x00000000); nv_mthd(dev, 0x9097, 0x03a8, 0x00000000); nv_mthd(dev, 0x9097, 0x03c8, 0x00000000); nv_mthd(dev, 0x9097, 0x03e8, 0x00000000); nv_mthd(dev, 0x9097, 0x038c, 0x00000000); nv_mthd(dev, 0x9097, 0x03ac, 0x00000000); nv_mthd(dev, 0x9097, 0x03cc, 0x00000000); nv_mthd(dev, 0x9097, 0x03ec, 0x00000000); nv_mthd(dev, 0x9097, 0x0700, 0x00000000); nv_mthd(dev, 0x9097, 0x0710, 0x00000000); nv_mthd(dev, 0x9097, 0x0720, 0x00000000); nv_mthd(dev, 0x9097, 0x0730, 0x00000000); nv_mthd(dev, 0x9097, 0x0704, 0x00000000); nv_mthd(dev, 0x9097, 0x0714, 0x00000000); nv_mthd(dev, 0x9097, 0x0724, 0x00000000); nv_mthd(dev, 0x9097, 0x0734, 0x00000000); nv_mthd(dev, 0x9097, 0x0708, 0x00000000); nv_mthd(dev, 0x9097, 0x0718, 0x00000000); nv_mthd(dev, 0x9097, 0x0728, 0x00000000); nv_mthd(dev, 0x9097, 0x0738, 0x00000000); nv_mthd(dev, 0x9097, 0x2800, 0x00000000); nv_mthd(dev, 0x9097, 0x2804, 0x00000000); nv_mthd(dev, 0x9097, 0x2808, 0x00000000); nv_mthd(dev, 0x9097, 0x280c, 0x00000000); nv_mthd(dev, 0x9097, 0x2810, 0x00000000); nv_mthd(dev, 0x9097, 0x2814, 0x00000000); nv_mthd(dev, 0x9097, 0x2818, 0x00000000); nv_mthd(dev, 0x9097, 0x281c, 0x00000000); nv_mthd(dev, 0x9097, 0x2820, 0x00000000); nv_mthd(dev, 0x9097, 0x2824, 0x00000000); nv_mthd(dev, 0x9097, 0x2828, 0x00000000); nv_mthd(dev, 0x9097, 0x282c, 0x00000000); nv_mthd(dev, 0x9097, 0x2830, 0x00000000); nv_mthd(dev, 0x9097, 0x2834, 0x00000000); nv_mthd(dev, 0x9097, 0x2838, 0x00000000); nv_mthd(dev, 0x9097, 0x283c, 0x00000000); nv_mthd(dev, 0x9097, 0x2840, 0x00000000); nv_mthd(dev, 0x9097, 0x2844, 0x00000000); nv_mthd(dev, 0x9097, 0x2848, 0x00000000); nv_mthd(dev, 0x9097, 0x284c, 0x00000000); nv_mthd(dev, 0x9097, 0x2850, 0x00000000); nv_mthd(dev, 0x9097, 0x2854, 0x00000000); nv_mthd(dev, 0x9097, 0x2858, 0x00000000); nv_mthd(dev, 0x9097, 0x285c, 0x00000000); nv_mthd(dev, 0x9097, 0x2860, 0x00000000); nv_mthd(dev, 0x9097, 0x2864, 0x00000000); nv_mthd(dev, 0x9097, 0x2868, 0x00000000); nv_mthd(dev, 0x9097, 0x286c, 0x00000000); nv_mthd(dev, 0x9097, 0x2870, 0x00000000); nv_mthd(dev, 0x9097, 0x2874, 0x00000000); nv_mthd(dev, 0x9097, 0x2878, 0x00000000); nv_mthd(dev, 0x9097, 0x287c, 0x00000000); nv_mthd(dev, 0x9097, 0x2880, 0x00000000); nv_mthd(dev, 0x9097, 0x2884, 0x00000000); nv_mthd(dev, 0x9097, 0x2888, 0x00000000); nv_mthd(dev, 0x9097, 0x288c, 0x00000000); nv_mthd(dev, 0x9097, 0x2890, 0x00000000); nv_mthd(dev, 0x9097, 0x2894, 0x00000000); nv_mthd(dev, 0x9097, 0x2898, 0x00000000); nv_mthd(dev, 0x9097, 0x289c, 0x00000000); nv_mthd(dev, 0x9097, 0x28a0, 0x00000000); nv_mthd(dev, 0x9097, 0x28a4, 0x00000000); nv_mthd(dev, 0x9097, 0x28a8, 0x00000000); nv_mthd(dev, 0x9097, 0x28ac, 0x00000000); nv_mthd(dev, 0x9097, 0x28b0, 0x00000000); nv_mthd(dev, 0x9097, 0x28b4, 0x00000000); nv_mthd(dev, 0x9097, 0x28b8, 0x00000000); nv_mthd(dev, 0x9097, 0x28bc, 0x00000000); nv_mthd(dev, 0x9097, 0x28c0, 0x00000000); nv_mthd(dev, 0x9097, 0x28c4, 0x00000000); nv_mthd(dev, 0x9097, 0x28c8, 0x00000000); nv_mthd(dev, 0x9097, 0x28cc, 0x00000000); nv_mthd(dev, 0x9097, 0x28d0, 0x00000000); nv_mthd(dev, 0x9097, 0x28d4, 0x00000000); nv_mthd(dev, 0x9097, 0x28d8, 0x00000000); nv_mthd(dev, 0x9097, 0x28dc, 0x00000000); nv_mthd(dev, 0x9097, 0x28e0, 0x00000000); nv_mthd(dev, 0x9097, 0x28e4, 0x00000000); nv_mthd(dev, 0x9097, 0x28e8, 0x00000000); nv_mthd(dev, 0x9097, 0x28ec, 0x00000000); nv_mthd(dev, 0x9097, 0x28f0, 0x00000000); nv_mthd(dev, 0x9097, 0x28f4, 0x00000000); nv_mthd(dev, 0x9097, 0x28f8, 0x00000000); nv_mthd(dev, 0x9097, 0x28fc, 0x00000000); nv_mthd(dev, 0x9097, 0x2900, 0x00000000); nv_mthd(dev, 0x9097, 0x2904, 0x00000000); nv_mthd(dev, 0x9097, 0x2908, 0x00000000); nv_mthd(dev, 0x9097, 0x290c, 0x00000000); nv_mthd(dev, 0x9097, 0x2910, 0x00000000); nv_mthd(dev, 0x9097, 0x2914, 0x00000000); nv_mthd(dev, 0x9097, 0x2918, 0x00000000); nv_mthd(dev, 0x9097, 0x291c, 0x00000000); nv_mthd(dev, 0x9097, 0x2920, 0x00000000); nv_mthd(dev, 0x9097, 0x2924, 0x00000000); nv_mthd(dev, 0x9097, 0x2928, 0x00000000); nv_mthd(dev, 0x9097, 0x292c, 0x00000000); nv_mthd(dev, 0x9097, 0x2930, 0x00000000); nv_mthd(dev, 0x9097, 0x2934, 0x00000000); nv_mthd(dev, 0x9097, 0x2938, 0x00000000); nv_mthd(dev, 0x9097, 0x293c, 0x00000000); nv_mthd(dev, 0x9097, 0x2940, 0x00000000); nv_mthd(dev, 0x9097, 0x2944, 0x00000000); nv_mthd(dev, 0x9097, 0x2948, 0x00000000); nv_mthd(dev, 0x9097, 0x294c, 0x00000000); nv_mthd(dev, 0x9097, 0x2950, 0x00000000); nv_mthd(dev, 0x9097, 0x2954, 0x00000000); nv_mthd(dev, 0x9097, 0x2958, 0x00000000); nv_mthd(dev, 0x9097, 0x295c, 0x00000000); nv_mthd(dev, 0x9097, 0x2960, 0x00000000); nv_mthd(dev, 0x9097, 0x2964, 0x00000000); nv_mthd(dev, 0x9097, 0x2968, 0x00000000); nv_mthd(dev, 0x9097, 0x296c, 0x00000000); nv_mthd(dev, 0x9097, 0x2970, 0x00000000); nv_mthd(dev, 0x9097, 0x2974, 0x00000000); nv_mthd(dev, 0x9097, 0x2978, 0x00000000); nv_mthd(dev, 0x9097, 0x297c, 0x00000000); nv_mthd(dev, 0x9097, 0x2980, 0x00000000); nv_mthd(dev, 0x9097, 0x2984, 0x00000000); nv_mthd(dev, 0x9097, 0x2988, 0x00000000); nv_mthd(dev, 0x9097, 0x298c, 0x00000000); nv_mthd(dev, 0x9097, 0x2990, 0x00000000); nv_mthd(dev, 0x9097, 0x2994, 0x00000000); nv_mthd(dev, 0x9097, 0x2998, 0x00000000); nv_mthd(dev, 0x9097, 0x299c, 0x00000000); nv_mthd(dev, 0x9097, 0x29a0, 0x00000000); nv_mthd(dev, 0x9097, 0x29a4, 0x00000000); nv_mthd(dev, 0x9097, 0x29a8, 0x00000000); nv_mthd(dev, 0x9097, 0x29ac, 0x00000000); nv_mthd(dev, 0x9097, 0x29b0, 0x00000000); nv_mthd(dev, 0x9097, 0x29b4, 0x00000000); nv_mthd(dev, 0x9097, 0x29b8, 0x00000000); nv_mthd(dev, 0x9097, 0x29bc, 0x00000000); nv_mthd(dev, 0x9097, 0x29c0, 0x00000000); nv_mthd(dev, 0x9097, 0x29c4, 0x00000000); nv_mthd(dev, 0x9097, 0x29c8, 0x00000000); nv_mthd(dev, 0x9097, 0x29cc, 0x00000000); nv_mthd(dev, 0x9097, 0x29d0, 0x00000000); nv_mthd(dev, 0x9097, 0x29d4, 0x00000000); nv_mthd(dev, 0x9097, 0x29d8, 0x00000000); nv_mthd(dev, 0x9097, 0x29dc, 0x00000000); nv_mthd(dev, 0x9097, 0x29e0, 0x00000000); nv_mthd(dev, 0x9097, 0x29e4, 0x00000000); nv_mthd(dev, 0x9097, 0x29e8, 0x00000000); nv_mthd(dev, 0x9097, 0x29ec, 0x00000000); nv_mthd(dev, 0x9097, 0x29f0, 0x00000000); nv_mthd(dev, 0x9097, 0x29f4, 0x00000000); nv_mthd(dev, 0x9097, 0x29f8, 0x00000000); nv_mthd(dev, 0x9097, 0x29fc, 0x00000000); nv_mthd(dev, 0x9097, 0x0a00, 0x00000000); nv_mthd(dev, 0x9097, 0x0a20, 0x00000000); nv_mthd(dev, 0x9097, 0x0a40, 0x00000000); nv_mthd(dev, 0x9097, 0x0a60, 0x00000000); nv_mthd(dev, 0x9097, 0x0a80, 0x00000000); nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000); nv_mthd(dev, 0x9097, 0x0b00, 0x00000000); nv_mthd(dev, 0x9097, 0x0b20, 0x00000000); nv_mthd(dev, 0x9097, 0x0b40, 0x00000000); nv_mthd(dev, 0x9097, 0x0b60, 0x00000000); nv_mthd(dev, 0x9097, 0x0b80, 0x00000000); nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000); nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000); nv_mthd(dev, 0x9097, 0x0be0, 0x00000000); nv_mthd(dev, 0x9097, 0x0a04, 0x00000000); nv_mthd(dev, 0x9097, 0x0a24, 0x00000000); nv_mthd(dev, 0x9097, 0x0a44, 0x00000000); nv_mthd(dev, 0x9097, 0x0a64, 0x00000000); nv_mthd(dev, 0x9097, 0x0a84, 0x00000000); nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000); nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000); nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000); nv_mthd(dev, 0x9097, 0x0b04, 0x00000000); nv_mthd(dev, 0x9097, 0x0b24, 0x00000000); nv_mthd(dev, 0x9097, 0x0b44, 0x00000000); nv_mthd(dev, 0x9097, 0x0b64, 0x00000000); nv_mthd(dev, 0x9097, 0x0b84, 0x00000000); nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000); nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000); nv_mthd(dev, 0x9097, 0x0be4, 0x00000000); nv_mthd(dev, 0x9097, 0x0a08, 0x00000000); nv_mthd(dev, 0x9097, 0x0a28, 0x00000000); nv_mthd(dev, 0x9097, 0x0a48, 0x00000000); nv_mthd(dev, 0x9097, 0x0a68, 0x00000000); nv_mthd(dev, 0x9097, 0x0a88, 0x00000000); nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000); nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000); nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000); nv_mthd(dev, 0x9097, 0x0b08, 0x00000000); nv_mthd(dev, 0x9097, 0x0b28, 0x00000000); nv_mthd(dev, 0x9097, 0x0b48, 0x00000000); nv_mthd(dev, 0x9097, 0x0b68, 0x00000000); nv_mthd(dev, 0x9097, 0x0b88, 0x00000000); nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000); nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000); nv_mthd(dev, 0x9097, 0x0be8, 0x00000000); nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000); nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000); nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000); nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000); nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000); nv_mthd(dev, 0x9097, 0x0aac, 0x00000000); nv_mthd(dev, 0x9097, 0x0acc, 0x00000000); nv_mthd(dev, 0x9097, 0x0aec, 0x00000000); nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000); nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000); nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000); nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000); nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000); nv_mthd(dev, 0x9097, 0x0bac, 0x00000000); nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000); nv_mthd(dev, 0x9097, 0x0bec, 0x00000000); nv_mthd(dev, 0x9097, 0x0a10, 0x00000000); nv_mthd(dev, 0x9097, 0x0a30, 0x00000000); nv_mthd(dev, 0x9097, 0x0a50, 0x00000000); nv_mthd(dev, 0x9097, 0x0a70, 0x00000000); nv_mthd(dev, 0x9097, 0x0a90, 0x00000000); nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000); nv_mthd(dev, 0x9097, 0x0af0, 0x00000000); nv_mthd(dev, 0x9097, 0x0b10, 0x00000000); nv_mthd(dev, 0x9097, 0x0b30, 0x00000000); nv_mthd(dev, 0x9097, 0x0b50, 0x00000000); nv_mthd(dev, 0x9097, 0x0b70, 0x00000000); nv_mthd(dev, 0x9097, 0x0b90, 0x00000000); nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000); nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000); nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000); nv_mthd(dev, 0x9097, 0x0a14, 0x00000000); nv_mthd(dev, 0x9097, 0x0a34, 0x00000000); nv_mthd(dev, 0x9097, 0x0a54, 0x00000000); nv_mthd(dev, 0x9097, 0x0a74, 0x00000000); nv_mthd(dev, 0x9097, 0x0a94, 0x00000000); nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000); nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000); nv_mthd(dev, 0x9097, 0x0af4, 0x00000000); nv_mthd(dev, 0x9097, 0x0b14, 0x00000000); nv_mthd(dev, 0x9097, 0x0b34, 0x00000000); nv_mthd(dev, 0x9097, 0x0b54, 0x00000000); nv_mthd(dev, 0x9097, 0x0b74, 0x00000000); nv_mthd(dev, 0x9097, 0x0b94, 0x00000000); nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000); nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000); nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000); nv_mthd(dev, 0x9097, 0x0c00, 0x00000000); nv_mthd(dev, 0x9097, 0x0c10, 0x00000000); nv_mthd(dev, 0x9097, 0x0c20, 0x00000000); nv_mthd(dev, 0x9097, 0x0c30, 0x00000000); nv_mthd(dev, 0x9097, 0x0c40, 0x00000000); nv_mthd(dev, 0x9097, 0x0c50, 0x00000000); nv_mthd(dev, 0x9097, 0x0c60, 0x00000000); nv_mthd(dev, 0x9097, 0x0c70, 0x00000000); nv_mthd(dev, 0x9097, 0x0c80, 0x00000000); nv_mthd(dev, 0x9097, 0x0c90, 0x00000000); nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000); nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000); nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000); nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000); nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000); nv_mthd(dev, 0x9097, 0x0c04, 0x00000000); nv_mthd(dev, 0x9097, 0x0c14, 0x00000000); nv_mthd(dev, 0x9097, 0x0c24, 0x00000000); nv_mthd(dev, 0x9097, 0x0c34, 0x00000000); nv_mthd(dev, 0x9097, 0x0c44, 0x00000000); nv_mthd(dev, 0x9097, 0x0c54, 0x00000000); nv_mthd(dev, 0x9097, 0x0c64, 0x00000000); nv_mthd(dev, 0x9097, 0x0c74, 0x00000000); nv_mthd(dev, 0x9097, 0x0c84, 0x00000000); nv_mthd(dev, 0x9097, 0x0c94, 0x00000000); nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000); nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000); nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000); nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000); nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000); nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000); nv_mthd(dev, 0x9097, 0x0c08, 0x00000000); nv_mthd(dev, 0x9097, 0x0c18, 0x00000000); nv_mthd(dev, 0x9097, 0x0c28, 0x00000000); nv_mthd(dev, 0x9097, 0x0c38, 0x00000000); nv_mthd(dev, 0x9097, 0x0c48, 0x00000000); nv_mthd(dev, 0x9097, 0x0c58, 0x00000000); nv_mthd(dev, 0x9097, 0x0c68, 0x00000000); nv_mthd(dev, 0x9097, 0x0c78, 0x00000000); nv_mthd(dev, 0x9097, 0x0c88, 0x00000000); nv_mthd(dev, 0x9097, 0x0c98, 0x00000000); nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000); nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000); nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000); nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000); nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000); nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000); nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000); nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000); nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000); nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000); nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000); nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000); nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e00, 0x00000000); nv_mthd(dev, 0x9097, 0x0e10, 0x00000000); nv_mthd(dev, 0x9097, 0x0e20, 0x00000000); nv_mthd(dev, 0x9097, 0x0e30, 0x00000000); nv_mthd(dev, 0x9097, 0x0e40, 0x00000000); nv_mthd(dev, 0x9097, 0x0e50, 0x00000000); nv_mthd(dev, 0x9097, 0x0e60, 0x00000000); nv_mthd(dev, 0x9097, 0x0e70, 0x00000000); nv_mthd(dev, 0x9097, 0x0e80, 0x00000000); nv_mthd(dev, 0x9097, 0x0e90, 0x00000000); nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000); nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000); nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000); nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000); nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d40, 0x00000000); nv_mthd(dev, 0x9097, 0x0d48, 0x00000000); nv_mthd(dev, 0x9097, 0x0d50, 0x00000000); nv_mthd(dev, 0x9097, 0x0d58, 0x00000000); nv_mthd(dev, 0x9097, 0x0d44, 0x00000000); nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000); nv_mthd(dev, 0x9097, 0x0d54, 0x00000000); nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000); nv_mthd(dev, 0x9097, 0x1e00, 0x00000001); nv_mthd(dev, 0x9097, 0x1e20, 0x00000001); nv_mthd(dev, 0x9097, 0x1e40, 0x00000001); nv_mthd(dev, 0x9097, 0x1e60, 0x00000001); nv_mthd(dev, 0x9097, 0x1e80, 0x00000001); nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001); nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001); nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001); nv_mthd(dev, 0x9097, 0x1e04, 0x00000001); nv_mthd(dev, 0x9097, 0x1e24, 0x00000001); nv_mthd(dev, 0x9097, 0x1e44, 0x00000001); nv_mthd(dev, 0x9097, 0x1e64, 0x00000001); nv_mthd(dev, 0x9097, 0x1e84, 0x00000001); nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001); nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001); nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001); nv_mthd(dev, 0x9097, 0x1e08, 0x00000002); nv_mthd(dev, 0x9097, 0x1e28, 0x00000002); nv_mthd(dev, 0x9097, 0x1e48, 0x00000002); nv_mthd(dev, 0x9097, 0x1e68, 0x00000002); nv_mthd(dev, 0x9097, 0x1e88, 0x00000002); nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002); nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002); nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002); nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001); nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001); nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001); nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001); nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001); nv_mthd(dev, 0x9097, 0x1eac, 0x00000001); nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001); nv_mthd(dev, 0x9097, 0x1eec, 0x00000001); nv_mthd(dev, 0x9097, 0x1e10, 0x00000001); nv_mthd(dev, 0x9097, 0x1e30, 0x00000001); nv_mthd(dev, 0x9097, 0x1e50, 0x00000001); nv_mthd(dev, 0x9097, 0x1e70, 0x00000001); nv_mthd(dev, 0x9097, 0x1e90, 0x00000001); nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001); nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001); nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001); nv_mthd(dev, 0x9097, 0x1e14, 0x00000002); nv_mthd(dev, 0x9097, 0x1e34, 0x00000002); nv_mthd(dev, 0x9097, 0x1e54, 0x00000002); nv_mthd(dev, 0x9097, 0x1e74, 0x00000002); nv_mthd(dev, 0x9097, 0x1e94, 0x00000002); nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002); nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002); nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002); nv_mthd(dev, 0x9097, 0x1e18, 0x00000001); nv_mthd(dev, 0x9097, 0x1e38, 0x00000001); nv_mthd(dev, 0x9097, 0x1e58, 0x00000001); nv_mthd(dev, 0x9097, 0x1e78, 0x00000001); nv_mthd(dev, 0x9097, 0x1e98, 0x00000001); nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); nv_mthd(dev, 0x9097, 0x3400, 0x00000000); nv_mthd(dev, 0x9097, 0x3404, 0x00000000); nv_mthd(dev, 0x9097, 0x3408, 0x00000000); nv_mthd(dev, 0x9097, 0x340c, 0x00000000); nv_mthd(dev, 0x9097, 0x3410, 0x00000000); nv_mthd(dev, 0x9097, 0x3414, 0x00000000); nv_mthd(dev, 0x9097, 0x3418, 0x00000000); nv_mthd(dev, 0x9097, 0x341c, 0x00000000); nv_mthd(dev, 0x9097, 0x3420, 0x00000000); nv_mthd(dev, 0x9097, 0x3424, 0x00000000); nv_mthd(dev, 0x9097, 0x3428, 0x00000000); nv_mthd(dev, 0x9097, 0x342c, 0x00000000); nv_mthd(dev, 0x9097, 0x3430, 0x00000000); nv_mthd(dev, 0x9097, 0x3434, 0x00000000); nv_mthd(dev, 0x9097, 0x3438, 0x00000000); nv_mthd(dev, 0x9097, 0x343c, 0x00000000); nv_mthd(dev, 0x9097, 0x3440, 0x00000000); nv_mthd(dev, 0x9097, 0x3444, 0x00000000); nv_mthd(dev, 0x9097, 0x3448, 0x00000000); nv_mthd(dev, 0x9097, 0x344c, 0x00000000); nv_mthd(dev, 0x9097, 0x3450, 0x00000000); nv_mthd(dev, 0x9097, 0x3454, 0x00000000); nv_mthd(dev, 0x9097, 0x3458, 0x00000000); nv_mthd(dev, 0x9097, 0x345c, 0x00000000); nv_mthd(dev, 0x9097, 0x3460, 0x00000000); nv_mthd(dev, 0x9097, 0x3464, 0x00000000); nv_mthd(dev, 0x9097, 0x3468, 0x00000000); nv_mthd(dev, 0x9097, 0x346c, 0x00000000); nv_mthd(dev, 0x9097, 0x3470, 0x00000000); nv_mthd(dev, 0x9097, 0x3474, 0x00000000); nv_mthd(dev, 0x9097, 0x3478, 0x00000000); nv_mthd(dev, 0x9097, 0x347c, 0x00000000); nv_mthd(dev, 0x9097, 0x3480, 0x00000000); nv_mthd(dev, 0x9097, 0x3484, 0x00000000); nv_mthd(dev, 0x9097, 0x3488, 0x00000000); nv_mthd(dev, 0x9097, 0x348c, 0x00000000); nv_mthd(dev, 0x9097, 0x3490, 0x00000000); nv_mthd(dev, 0x9097, 0x3494, 0x00000000); nv_mthd(dev, 0x9097, 0x3498, 0x00000000); nv_mthd(dev, 0x9097, 0x349c, 0x00000000); nv_mthd(dev, 0x9097, 0x34a0, 0x00000000); nv_mthd(dev, 0x9097, 0x34a4, 0x00000000); nv_mthd(dev, 0x9097, 0x34a8, 0x00000000); nv_mthd(dev, 0x9097, 0x34ac, 0x00000000); nv_mthd(dev, 0x9097, 0x34b0, 0x00000000); nv_mthd(dev, 0x9097, 0x34b4, 0x00000000); nv_mthd(dev, 0x9097, 0x34b8, 0x00000000); nv_mthd(dev, 0x9097, 0x34bc, 0x00000000); nv_mthd(dev, 0x9097, 0x34c0, 0x00000000); nv_mthd(dev, 0x9097, 0x34c4, 0x00000000); nv_mthd(dev, 0x9097, 0x34c8, 0x00000000); nv_mthd(dev, 0x9097, 0x34cc, 0x00000000); nv_mthd(dev, 0x9097, 0x34d0, 0x00000000); nv_mthd(dev, 0x9097, 0x34d4, 0x00000000); nv_mthd(dev, 0x9097, 0x34d8, 0x00000000); nv_mthd(dev, 0x9097, 0x34dc, 0x00000000); nv_mthd(dev, 0x9097, 0x34e0, 0x00000000); nv_mthd(dev, 0x9097, 0x34e4, 0x00000000); nv_mthd(dev, 0x9097, 0x34e8, 0x00000000); nv_mthd(dev, 0x9097, 0x34ec, 0x00000000); nv_mthd(dev, 0x9097, 0x34f0, 0x00000000); nv_mthd(dev, 0x9097, 0x34f4, 0x00000000); nv_mthd(dev, 0x9097, 0x34f8, 0x00000000); nv_mthd(dev, 0x9097, 0x34fc, 0x00000000); nv_mthd(dev, 0x9097, 0x3500, 0x00000000); nv_mthd(dev, 0x9097, 0x3504, 0x00000000); nv_mthd(dev, 0x9097, 0x3508, 0x00000000); nv_mthd(dev, 0x9097, 0x350c, 0x00000000); nv_mthd(dev, 0x9097, 0x3510, 0x00000000); nv_mthd(dev, 0x9097, 0x3514, 0x00000000); nv_mthd(dev, 0x9097, 0x3518, 0x00000000); nv_mthd(dev, 0x9097, 0x351c, 0x00000000); nv_mthd(dev, 0x9097, 0x3520, 0x00000000); nv_mthd(dev, 0x9097, 0x3524, 0x00000000); nv_mthd(dev, 0x9097, 0x3528, 0x00000000); nv_mthd(dev, 0x9097, 0x352c, 0x00000000); nv_mthd(dev, 0x9097, 0x3530, 0x00000000); nv_mthd(dev, 0x9097, 0x3534, 0x00000000); nv_mthd(dev, 0x9097, 0x3538, 0x00000000); nv_mthd(dev, 0x9097, 0x353c, 0x00000000); nv_mthd(dev, 0x9097, 0x3540, 0x00000000); nv_mthd(dev, 0x9097, 0x3544, 0x00000000); nv_mthd(dev, 0x9097, 0x3548, 0x00000000); nv_mthd(dev, 0x9097, 0x354c, 0x00000000); nv_mthd(dev, 0x9097, 0x3550, 0x00000000); nv_mthd(dev, 0x9097, 0x3554, 0x00000000); nv_mthd(dev, 0x9097, 0x3558, 0x00000000); nv_mthd(dev, 0x9097, 0x355c, 0x00000000); nv_mthd(dev, 0x9097, 0x3560, 0x00000000); nv_mthd(dev, 0x9097, 0x3564, 0x00000000); nv_mthd(dev, 0x9097, 0x3568, 0x00000000); nv_mthd(dev, 0x9097, 0x356c, 0x00000000); nv_mthd(dev, 0x9097, 0x3570, 0x00000000); nv_mthd(dev, 0x9097, 0x3574, 0x00000000); nv_mthd(dev, 0x9097, 0x3578, 0x00000000); nv_mthd(dev, 0x9097, 0x357c, 0x00000000); nv_mthd(dev, 0x9097, 0x3580, 0x00000000); nv_mthd(dev, 0x9097, 0x3584, 0x00000000); nv_mthd(dev, 0x9097, 0x3588, 0x00000000); nv_mthd(dev, 0x9097, 0x358c, 0x00000000); nv_mthd(dev, 0x9097, 0x3590, 0x00000000); nv_mthd(dev, 0x9097, 0x3594, 0x00000000); nv_mthd(dev, 0x9097, 0x3598, 0x00000000); nv_mthd(dev, 0x9097, 0x359c, 0x00000000); nv_mthd(dev, 0x9097, 0x35a0, 0x00000000); nv_mthd(dev, 0x9097, 0x35a4, 0x00000000); nv_mthd(dev, 0x9097, 0x35a8, 0x00000000); nv_mthd(dev, 0x9097, 0x35ac, 0x00000000); nv_mthd(dev, 0x9097, 0x35b0, 0x00000000); nv_mthd(dev, 0x9097, 0x35b4, 0x00000000); nv_mthd(dev, 0x9097, 0x35b8, 0x00000000); nv_mthd(dev, 0x9097, 0x35bc, 0x00000000); nv_mthd(dev, 0x9097, 0x35c0, 0x00000000); nv_mthd(dev, 0x9097, 0x35c4, 0x00000000); nv_mthd(dev, 0x9097, 0x35c8, 0x00000000); nv_mthd(dev, 0x9097, 0x35cc, 0x00000000); nv_mthd(dev, 0x9097, 0x35d0, 0x00000000); nv_mthd(dev, 0x9097, 0x35d4, 0x00000000); nv_mthd(dev, 0x9097, 0x35d8, 0x00000000); nv_mthd(dev, 0x9097, 0x35dc, 0x00000000); nv_mthd(dev, 0x9097, 0x35e0, 0x00000000); nv_mthd(dev, 0x9097, 0x35e4, 0x00000000); nv_mthd(dev, 0x9097, 0x35e8, 0x00000000); nv_mthd(dev, 0x9097, 0x35ec, 0x00000000); nv_mthd(dev, 0x9097, 0x35f0, 0x00000000); nv_mthd(dev, 0x9097, 0x35f4, 0x00000000); nv_mthd(dev, 0x9097, 0x35f8, 0x00000000); nv_mthd(dev, 0x9097, 0x35fc, 0x00000000); nv_mthd(dev, 0x9097, 0x030c, 0x00000001); nv_mthd(dev, 0x9097, 0x1944, 0x00000000); nv_mthd(dev, 0x9097, 0x1514, 0x00000000); nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff); nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881); nv_mthd(dev, 0x9097, 0x0fac, 0x00000001); nv_mthd(dev, 0x9097, 0x1538, 0x00000001); nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000); nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000); nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014); nv_mthd(dev, 0x9097, 0x0fec, 0x00000040); nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000); nv_mthd(dev, 0x9097, 0x179c, 0x00000000); nv_mthd(dev, 0x9097, 0x1228, 0x00000400); nv_mthd(dev, 0x9097, 0x122c, 0x00000300); nv_mthd(dev, 0x9097, 0x1230, 0x00010001); nv_mthd(dev, 0x9097, 0x07f8, 0x00000000); nv_mthd(dev, 0x9097, 0x15b4, 0x00000001); nv_mthd(dev, 0x9097, 0x15cc, 0x00000000); nv_mthd(dev, 0x9097, 0x1534, 0x00000000); nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000); nv_mthd(dev, 0x9097, 0x15d0, 0x00000000); nv_mthd(dev, 0x9097, 0x153c, 0x00000000); nv_mthd(dev, 0x9097, 0x16b4, 0x00000003); nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff); nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff); nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff); nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff); nv_mthd(dev, 0x9097, 0x0df8, 0x00000000); nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000); nv_mthd(dev, 0x9097, 0x1948, 0x00000000); nv_mthd(dev, 0x9097, 0x1970, 0x00000001); nv_mthd(dev, 0x9097, 0x161c, 0x000009f0); nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010); nv_mthd(dev, 0x9097, 0x163c, 0x00000000); nv_mthd(dev, 0x9097, 0x15e4, 0x00000000); nv_mthd(dev, 0x9097, 0x1160, 0x25e00040); nv_mthd(dev, 0x9097, 0x1164, 0x25e00040); nv_mthd(dev, 0x9097, 0x1168, 0x25e00040); nv_mthd(dev, 0x9097, 0x116c, 0x25e00040); nv_mthd(dev, 0x9097, 0x1170, 0x25e00040); nv_mthd(dev, 0x9097, 0x1174, 0x25e00040); nv_mthd(dev, 0x9097, 0x1178, 0x25e00040); nv_mthd(dev, 0x9097, 0x117c, 0x25e00040); nv_mthd(dev, 0x9097, 0x1180, 0x25e00040); nv_mthd(dev, 0x9097, 0x1184, 0x25e00040); nv_mthd(dev, 0x9097, 0x1188, 0x25e00040); nv_mthd(dev, 0x9097, 0x118c, 0x25e00040); nv_mthd(dev, 0x9097, 0x1190, 0x25e00040); nv_mthd(dev, 0x9097, 0x1194, 0x25e00040); nv_mthd(dev, 0x9097, 0x1198, 0x25e00040); nv_mthd(dev, 0x9097, 0x119c, 0x25e00040); nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040); nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040); nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040); nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040); nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040); nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040); nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040); nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040); nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040); nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040); nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040); nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040); nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040); nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040); nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040); nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040); nv_mthd(dev, 0x9097, 0x1880, 0x00000000); nv_mthd(dev, 0x9097, 0x1884, 0x00000000); nv_mthd(dev, 0x9097, 0x1888, 0x00000000); nv_mthd(dev, 0x9097, 0x188c, 0x00000000); nv_mthd(dev, 0x9097, 0x1890, 0x00000000); nv_mthd(dev, 0x9097, 0x1894, 0x00000000); nv_mthd(dev, 0x9097, 0x1898, 0x00000000); nv_mthd(dev, 0x9097, 0x189c, 0x00000000); nv_mthd(dev, 0x9097, 0x18a0, 0x00000000); nv_mthd(dev, 0x9097, 0x18a4, 0x00000000); nv_mthd(dev, 0x9097, 0x18a8, 0x00000000); nv_mthd(dev, 0x9097, 0x18ac, 0x00000000); nv_mthd(dev, 0x9097, 0x18b0, 0x00000000); nv_mthd(dev, 0x9097, 0x18b4, 0x00000000); nv_mthd(dev, 0x9097, 0x18b8, 0x00000000); nv_mthd(dev, 0x9097, 0x18bc, 0x00000000); nv_mthd(dev, 0x9097, 0x18c0, 0x00000000); nv_mthd(dev, 0x9097, 0x18c4, 0x00000000); nv_mthd(dev, 0x9097, 0x18c8, 0x00000000); nv_mthd(dev, 0x9097, 0x18cc, 0x00000000); nv_mthd(dev, 0x9097, 0x18d0, 0x00000000); nv_mthd(dev, 0x9097, 0x18d4, 0x00000000); nv_mthd(dev, 0x9097, 0x18d8, 0x00000000); nv_mthd(dev, 0x9097, 0x18dc, 0x00000000); nv_mthd(dev, 0x9097, 0x18e0, 0x00000000); nv_mthd(dev, 0x9097, 0x18e4, 0x00000000); nv_mthd(dev, 0x9097, 0x18e8, 0x00000000); nv_mthd(dev, 0x9097, 0x18ec, 0x00000000); nv_mthd(dev, 0x9097, 0x18f0, 0x00000000); nv_mthd(dev, 0x9097, 0x18f4, 0x00000000); nv_mthd(dev, 0x9097, 0x18f8, 0x00000000); nv_mthd(dev, 0x9097, 0x18fc, 0x00000000); nv_mthd(dev, 0x9097, 0x0f84, 0x00000000); nv_mthd(dev, 0x9097, 0x0f88, 0x00000000); nv_mthd(dev, 0x9097, 0x17c8, 0x00000000); nv_mthd(dev, 0x9097, 0x17cc, 0x00000000); nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff); nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff); nv_mthd(dev, 0x9097, 0x17d8, 0x00000002); nv_mthd(dev, 0x9097, 0x17dc, 0x00000000); nv_mthd(dev, 0x9097, 0x15f4, 0x00000000); nv_mthd(dev, 0x9097, 0x15f8, 0x00000000); nv_mthd(dev, 0x9097, 0x1434, 0x00000000); nv_mthd(dev, 0x9097, 0x1438, 0x00000000); nv_mthd(dev, 0x9097, 0x0d74, 0x00000000); nv_mthd(dev, 0x9097, 0x0dec, 0x00000001); nv_mthd(dev, 0x9097, 0x13a4, 0x00000000); nv_mthd(dev, 0x9097, 0x1318, 0x00000001); nv_mthd(dev, 0x9097, 0x1644, 0x00000000); nv_mthd(dev, 0x9097, 0x0748, 0x00000000); nv_mthd(dev, 0x9097, 0x0de8, 0x00000000); nv_mthd(dev, 0x9097, 0x1648, 0x00000000); nv_mthd(dev, 0x9097, 0x12a4, 0x00000000); nv_mthd(dev, 0x9097, 0x1120, 0x00000000); nv_mthd(dev, 0x9097, 0x1124, 0x00000000); nv_mthd(dev, 0x9097, 0x1128, 0x00000000); nv_mthd(dev, 0x9097, 0x112c, 0x00000000); nv_mthd(dev, 0x9097, 0x1118, 0x00000000); nv_mthd(dev, 0x9097, 0x164c, 0x00000000); nv_mthd(dev, 0x9097, 0x1658, 0x00000000); nv_mthd(dev, 0x9097, 0x1910, 0x00000290); nv_mthd(dev, 0x9097, 0x1518, 0x00000000); nv_mthd(dev, 0x9097, 0x165c, 0x00000001); nv_mthd(dev, 0x9097, 0x1520, 0x00000000); nv_mthd(dev, 0x9097, 0x1604, 0x00000000); nv_mthd(dev, 0x9097, 0x1570, 0x00000000); nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000); nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000); nv_mthd(dev, 0x9097, 0x020c, 0x00000000); nv_mthd(dev, 0x9097, 0x1670, 0x30201000); nv_mthd(dev, 0x9097, 0x1674, 0x70605040); nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888); nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8); nv_mthd(dev, 0x9097, 0x166c, 0x00000000); nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00); nv_mthd(dev, 0x9097, 0x12d0, 0x00000003); nv_mthd(dev, 0x9097, 0x12d4, 0x00000002); nv_mthd(dev, 0x9097, 0x1684, 0x00000000); nv_mthd(dev, 0x9097, 0x1688, 0x00000000); nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02); nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02); nv_mthd(dev, 0x9097, 0x0db4, 0x00000000); nv_mthd(dev, 0x9097, 0x168c, 0x00000000); nv_mthd(dev, 0x9097, 0x15bc, 0x00000000); nv_mthd(dev, 0x9097, 0x156c, 0x00000000); nv_mthd(dev, 0x9097, 0x187c, 0x00000000); nv_mthd(dev, 0x9097, 0x1110, 0x00000001); nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000); nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000); nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000); nv_mthd(dev, 0x9097, 0x1234, 0x00000000); nv_mthd(dev, 0x9097, 0x1690, 0x00000000); nv_mthd(dev, 0x9097, 0x12ac, 0x00000001); nv_mthd(dev, 0x9097, 0x02c4, 0x00000000); nv_mthd(dev, 0x9097, 0x0790, 0x00000000); nv_mthd(dev, 0x9097, 0x0794, 0x00000000); nv_mthd(dev, 0x9097, 0x0798, 0x00000000); nv_mthd(dev, 0x9097, 0x079c, 0x00000000); nv_mthd(dev, 0x9097, 0x07a0, 0x00000000); nv_mthd(dev, 0x9097, 0x077c, 0x00000000); nv_mthd(dev, 0x9097, 0x1000, 0x00000010); nv_mthd(dev, 0x9097, 0x10fc, 0x00000000); nv_mthd(dev, 0x9097, 0x1290, 0x00000000); nv_mthd(dev, 0x9097, 0x0218, 0x00000010); nv_mthd(dev, 0x9097, 0x12d8, 0x00000000); nv_mthd(dev, 0x9097, 0x12dc, 0x00000010); nv_mthd(dev, 0x9097, 0x0d94, 0x00000001); nv_mthd(dev, 0x9097, 0x155c, 0x00000000); nv_mthd(dev, 0x9097, 0x1560, 0x00000000); nv_mthd(dev, 0x9097, 0x1564, 0x00001fff); nv_mthd(dev, 0x9097, 0x1574, 0x00000000); nv_mthd(dev, 0x9097, 0x1578, 0x00000000); nv_mthd(dev, 0x9097, 0x157c, 0x003fffff); nv_mthd(dev, 0x9097, 0x1354, 0x00000000); nv_mthd(dev, 0x9097, 0x1664, 0x00000000); nv_mthd(dev, 0x9097, 0x1610, 0x00000012); nv_mthd(dev, 0x9097, 0x1608, 0x00000000); nv_mthd(dev, 0x9097, 0x160c, 0x00000000); nv_mthd(dev, 0x9097, 0x162c, 0x00000003); nv_mthd(dev, 0x9097, 0x0210, 0x00000000); nv_mthd(dev, 0x9097, 0x0320, 0x00000000); nv_mthd(dev, 0x9097, 0x0324, 0x3f800000); nv_mthd(dev, 0x9097, 0x0328, 0x3f800000); nv_mthd(dev, 0x9097, 0x032c, 0x3f800000); nv_mthd(dev, 0x9097, 0x0330, 0x3f800000); nv_mthd(dev, 0x9097, 0x0334, 0x3f800000); nv_mthd(dev, 0x9097, 0x0338, 0x3f800000); nv_mthd(dev, 0x9097, 0x0750, 0x00000000); nv_mthd(dev, 0x9097, 0x0760, 0x39291909); nv_mthd(dev, 0x9097, 0x0764, 0x79695949); nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989); nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9); nv_mthd(dev, 0x9097, 0x0770, 0x30201000); nv_mthd(dev, 0x9097, 0x0774, 0x70605040); nv_mthd(dev, 0x9097, 0x0778, 0x00009080); nv_mthd(dev, 0x9097, 0x0780, 0x39291909); nv_mthd(dev, 0x9097, 0x0784, 0x79695949); nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989); nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9); nv_mthd(dev, 0x9097, 0x07d0, 0x30201000); nv_mthd(dev, 0x9097, 0x07d4, 0x70605040); nv_mthd(dev, 0x9097, 0x07d8, 0x00009080); nv_mthd(dev, 0x9097, 0x037c, 0x00000001); nv_mthd(dev, 0x9097, 0x0740, 0x00000000); nv_mthd(dev, 0x9097, 0x0744, 0x00000000); nv_mthd(dev, 0x9097, 0x2600, 0x00000000); nv_mthd(dev, 0x9097, 0x1918, 0x00000000); nv_mthd(dev, 0x9097, 0x191c, 0x00000900); nv_mthd(dev, 0x9097, 0x1920, 0x00000405); nv_mthd(dev, 0x9097, 0x1308, 0x00000001); nv_mthd(dev, 0x9097, 0x1924, 0x00000000); nv_mthd(dev, 0x9097, 0x13ac, 0x00000000); nv_mthd(dev, 0x9097, 0x192c, 0x00000001); nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c); nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000); nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000); nv_mthd(dev, 0x9097, 0x02c0, 0x00000001); nv_mthd(dev, 0x9097, 0x1510, 0x00000000); nv_mthd(dev, 0x9097, 0x1940, 0x00000000); nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000); nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000); nv_mthd(dev, 0x9097, 0x194c, 0x00000000); nv_mthd(dev, 0x9097, 0x1950, 0x00000000); nv_mthd(dev, 0x9097, 0x1968, 0x00000000); nv_mthd(dev, 0x9097, 0x1590, 0x0000003f); nv_mthd(dev, 0x9097, 0x07e8, 0x00000000); nv_mthd(dev, 0x9097, 0x07ec, 0x00000000); nv_mthd(dev, 0x9097, 0x07f0, 0x00000000); nv_mthd(dev, 0x9097, 0x07f4, 0x00000000); nv_mthd(dev, 0x9097, 0x196c, 0x00000011); nv_mthd(dev, 0x9097, 0x197c, 0x00000000); nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000); nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000); nv_mthd(dev, 0x9097, 0x02d8, 0x00000040); nv_mthd(dev, 0x9097, 0x1980, 0x00000080); nv_mthd(dev, 0x9097, 0x1504, 0x00000080); nv_mthd(dev, 0x9097, 0x1984, 0x00000000); nv_mthd(dev, 0x9097, 0x0300, 0x00000001); nv_mthd(dev, 0x9097, 0x13a8, 0x00000000); nv_mthd(dev, 0x9097, 0x12ec, 0x00000000); nv_mthd(dev, 0x9097, 0x1310, 0x00000000); nv_mthd(dev, 0x9097, 0x1314, 0x00000001); nv_mthd(dev, 0x9097, 0x1380, 0x00000000); nv_mthd(dev, 0x9097, 0x1384, 0x00000001); nv_mthd(dev, 0x9097, 0x1388, 0x00000001); nv_mthd(dev, 0x9097, 0x138c, 0x00000001); nv_mthd(dev, 0x9097, 0x1390, 0x00000001); nv_mthd(dev, 0x9097, 0x1394, 0x00000000); nv_mthd(dev, 0x9097, 0x139c, 0x00000000); nv_mthd(dev, 0x9097, 0x1398, 0x00000000); nv_mthd(dev, 0x9097, 0x1594, 0x00000000); nv_mthd(dev, 0x9097, 0x1598, 0x00000001); nv_mthd(dev, 0x9097, 0x159c, 0x00000001); nv_mthd(dev, 0x9097, 0x15a0, 0x00000001); nv_mthd(dev, 0x9097, 0x15a4, 0x00000001); nv_mthd(dev, 0x9097, 0x0f54, 0x00000000); nv_mthd(dev, 0x9097, 0x0f58, 0x00000000); nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000); nv_mthd(dev, 0x9097, 0x19bc, 0x00000000); nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000); nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000); nv_mthd(dev, 0x9097, 0x12cc, 0x00000000); nv_mthd(dev, 0x9097, 0x12e8, 0x00000000); nv_mthd(dev, 0x9097, 0x130c, 0x00000001); nv_mthd(dev, 0x9097, 0x1360, 0x00000000); nv_mthd(dev, 0x9097, 0x1364, 0x00000000); nv_mthd(dev, 0x9097, 0x1368, 0x00000000); nv_mthd(dev, 0x9097, 0x136c, 0x00000000); nv_mthd(dev, 0x9097, 0x1370, 0x00000000); nv_mthd(dev, 0x9097, 0x1374, 0x00000000); nv_mthd(dev, 0x9097, 0x1378, 0x00000000); nv_mthd(dev, 0x9097, 0x137c, 0x00000000); nv_mthd(dev, 0x9097, 0x133c, 0x00000001); nv_mthd(dev, 0x9097, 0x1340, 0x00000001); nv_mthd(dev, 0x9097, 0x1344, 0x00000002); nv_mthd(dev, 0x9097, 0x1348, 0x00000001); nv_mthd(dev, 0x9097, 0x134c, 0x00000001); nv_mthd(dev, 0x9097, 0x1350, 0x00000002); nv_mthd(dev, 0x9097, 0x1358, 0x00000001); nv_mthd(dev, 0x9097, 0x12e4, 0x00000000); nv_mthd(dev, 0x9097, 0x131c, 0x00000000); nv_mthd(dev, 0x9097, 0x1320, 0x00000000); nv_mthd(dev, 0x9097, 0x1324, 0x00000000); nv_mthd(dev, 0x9097, 0x1328, 0x00000000); nv_mthd(dev, 0x9097, 0x19c0, 0x00000000); nv_mthd(dev, 0x9097, 0x1140, 0x00000000); nv_mthd(dev, 0x9097, 0x19c4, 0x00000000); nv_mthd(dev, 0x9097, 0x19c8, 0x00001500); nv_mthd(dev, 0x9097, 0x135c, 0x00000000); nv_mthd(dev, 0x9097, 0x0f90, 0x00000000); nv_mthd(dev, 0x9097, 0x19e0, 0x00000001); nv_mthd(dev, 0x9097, 0x19e4, 0x00000001); nv_mthd(dev, 0x9097, 0x19e8, 0x00000001); nv_mthd(dev, 0x9097, 0x19ec, 0x00000001); nv_mthd(dev, 0x9097, 0x19f0, 0x00000001); nv_mthd(dev, 0x9097, 0x19f4, 0x00000001); nv_mthd(dev, 0x9097, 0x19f8, 0x00000001); nv_mthd(dev, 0x9097, 0x19fc, 0x00000001); nv_mthd(dev, 0x9097, 0x19cc, 0x00000001); nv_mthd(dev, 0x9097, 0x15b8, 0x00000000); nv_mthd(dev, 0x9097, 0x1a00, 0x00001111); nv_mthd(dev, 0x9097, 0x1a04, 0x00000000); nv_mthd(dev, 0x9097, 0x1a08, 0x00000000); nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000); nv_mthd(dev, 0x9097, 0x1a10, 0x00000000); nv_mthd(dev, 0x9097, 0x1a14, 0x00000000); nv_mthd(dev, 0x9097, 0x1a18, 0x00000000); nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000); nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000); nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000); nv_mthd(dev, 0x9097, 0x10f8, 0x00001010); nv_mthd(dev, 0x9097, 0x0d80, 0x00000000); nv_mthd(dev, 0x9097, 0x0d84, 0x00000000); nv_mthd(dev, 0x9097, 0x0d88, 0x00000000); nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000); nv_mthd(dev, 0x9097, 0x0d90, 0x00000000); nv_mthd(dev, 0x9097, 0x0da0, 0x00000000); nv_mthd(dev, 0x9097, 0x1508, 0x80000000); nv_mthd(dev, 0x9097, 0x150c, 0x40000000); nv_mthd(dev, 0x9097, 0x1668, 0x00000000); nv_mthd(dev, 0x9097, 0x0318, 0x00000008); nv_mthd(dev, 0x9097, 0x031c, 0x00000008); nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001); nv_mthd(dev, 0x9097, 0x07dc, 0x00000000); nv_mthd(dev, 0x9097, 0x074c, 0x00000055); nv_mthd(dev, 0x9097, 0x1420, 0x00000003); nv_mthd(dev, 0x9097, 0x17bc, 0x00000000); nv_mthd(dev, 0x9097, 0x17c0, 0x00000000); nv_mthd(dev, 0x9097, 0x17c4, 0x00000001); nv_mthd(dev, 0x9097, 0x1008, 0x00000008); nv_mthd(dev, 0x9097, 0x100c, 0x00000040); nv_mthd(dev, 0x9097, 0x1010, 0x0000012c); nv_mthd(dev, 0x9097, 0x0d60, 0x00000040); nv_mthd(dev, 0x9097, 0x075c, 0x00000003); nv_mthd(dev, 0x9097, 0x1018, 0x00000020); nv_mthd(dev, 0x9097, 0x101c, 0x00000001); nv_mthd(dev, 0x9097, 0x1020, 0x00000020); nv_mthd(dev, 0x9097, 0x1024, 0x00000001); nv_mthd(dev, 0x9097, 0x1444, 0x00000000); nv_mthd(dev, 0x9097, 0x1448, 0x00000000); nv_mthd(dev, 0x9097, 0x144c, 0x00000000); nv_mthd(dev, 0x9097, 0x0360, 0x20164010); nv_mthd(dev, 0x9097, 0x0364, 0x00000020); nv_mthd(dev, 0x9097, 0x0368, 0x00000000); nv_mthd(dev, 0x9097, 0x0de4, 0x00000000); nv_mthd(dev, 0x9097, 0x0204, 0x00000006); nv_mthd(dev, 0x9097, 0x0208, 0x00000000); nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff); nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48); nv_mthd(dev, 0x9097, 0x1220, 0x00000005); nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000); nv_mthd(dev, 0x9097, 0x0f98, 0x00300008); nv_mthd(dev, 0x9097, 0x1284, 0x04000080); nv_mthd(dev, 0x9097, 0x1450, 0x00300008); nv_mthd(dev, 0x9097, 0x1454, 0x04000080); nv_mthd(dev, 0x9097, 0x0214, 0x00000000); /* in trace, right after 0x90c0, not here */ nv_mthd(dev, 0x9097, 0x3410, 0x80002006); } static void nvc0_grctx_generate_902d(struct drm_device *dev) { nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); nv_mthd(dev, 0x902d, 0x0204, 0x00000001); nv_mthd(dev, 0x902d, 0x0208, 0x00000020); nv_mthd(dev, 0x902d, 0x020c, 0x00000001); nv_mthd(dev, 0x902d, 0x0210, 0x00000000); nv_mthd(dev, 0x902d, 0x0214, 0x00000080); nv_mthd(dev, 0x902d, 0x0218, 0x00000100); nv_mthd(dev, 0x902d, 0x021c, 0x00000100); nv_mthd(dev, 0x902d, 0x0220, 0x00000000); nv_mthd(dev, 0x902d, 0x0224, 0x00000000); nv_mthd(dev, 0x902d, 0x0230, 0x000000cf); nv_mthd(dev, 0x902d, 0x0234, 0x00000001); nv_mthd(dev, 0x902d, 0x0238, 0x00000020); nv_mthd(dev, 0x902d, 0x023c, 0x00000001); nv_mthd(dev, 0x902d, 0x0244, 0x00000080); nv_mthd(dev, 0x902d, 0x0248, 0x00000100); nv_mthd(dev, 0x902d, 0x024c, 0x00000100); } static void nvc0_grctx_generate_9039(struct drm_device *dev) { nv_mthd(dev, 0x9039, 0x030c, 0x00000000); nv_mthd(dev, 0x9039, 0x0310, 0x00000000); nv_mthd(dev, 0x9039, 0x0314, 0x00000000); nv_mthd(dev, 0x9039, 0x0320, 0x00000000); nv_mthd(dev, 0x9039, 0x0238, 0x00000000); nv_mthd(dev, 0x9039, 0x023c, 0x00000000); nv_mthd(dev, 0x9039, 0x0318, 0x00000000); nv_mthd(dev, 0x9039, 0x031c, 0x00000000); } static void nvc0_grctx_generate_90c0(struct drm_device *dev) { nv_mthd(dev, 0x90c0, 0x270c, 0x00000000); nv_mthd(dev, 0x90c0, 0x272c, 0x00000000); nv_mthd(dev, 0x90c0, 0x274c, 0x00000000); nv_mthd(dev, 0x90c0, 0x276c, 0x00000000); nv_mthd(dev, 0x90c0, 0x278c, 0x00000000); nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000); nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000); nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000); nv_mthd(dev, 0x90c0, 0x030c, 0x00000001); nv_mthd(dev, 0x90c0, 0x1944, 0x00000000); nv_mthd(dev, 0x90c0, 0x0758, 0x00000100); nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000); nv_mthd(dev, 0x90c0, 0x0790, 0x00000000); nv_mthd(dev, 0x90c0, 0x0794, 0x00000000); nv_mthd(dev, 0x90c0, 0x0798, 0x00000000); nv_mthd(dev, 0x90c0, 0x079c, 0x00000000); nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000); nv_mthd(dev, 0x90c0, 0x077c, 0x00000000); nv_mthd(dev, 0x90c0, 0x0204, 0x00000000); nv_mthd(dev, 0x90c0, 0x0208, 0x00000000); nv_mthd(dev, 0x90c0, 0x020c, 0x00000000); nv_mthd(dev, 0x90c0, 0x0214, 0x00000000); nv_mthd(dev, 0x90c0, 0x024c, 0x00000000); nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001); nv_mthd(dev, 0x90c0, 0x1608, 0x00000000); nv_mthd(dev, 0x90c0, 0x160c, 0x00000000); nv_mthd(dev, 0x90c0, 0x1664, 0x00000000); } static void nvc0_grctx_generate_dispatch(struct drm_device *dev) { int i; nv_wr32(dev, 0x404004, 0x00000000); nv_wr32(dev, 0x404008, 0x00000000); nv_wr32(dev, 0x40400c, 0x00000000); nv_wr32(dev, 0x404010, 0x00000000); nv_wr32(dev, 0x404014, 0x00000000); nv_wr32(dev, 0x404018, 0x00000000); nv_wr32(dev, 0x40401c, 0x00000000); nv_wr32(dev, 0x404020, 0x00000000); nv_wr32(dev, 0x404024, 0x00000000); nv_wr32(dev, 0x404028, 0x00000000); nv_wr32(dev, 0x40402c, 0x00000000); nv_wr32(dev, 0x404044, 0x00000000); nv_wr32(dev, 0x404094, 0x00000000); nv_wr32(dev, 0x404098, 0x00000000); nv_wr32(dev, 0x40409c, 0x00000000); nv_wr32(dev, 0x4040a0, 0x00000000); nv_wr32(dev, 0x4040a4, 0x00000000); nv_wr32(dev, 0x4040a8, 0x00000000); nv_wr32(dev, 0x4040ac, 0x00000000); nv_wr32(dev, 0x4040b0, 0x00000000); nv_wr32(dev, 0x4040b4, 0x00000000); nv_wr32(dev, 0x4040b8, 0x00000000); nv_wr32(dev, 0x4040bc, 0x00000000); nv_wr32(dev, 0x4040c0, 0x00000000); nv_wr32(dev, 0x4040c4, 0x00000000); nv_wr32(dev, 0x4040c8, 0xf0000087); nv_wr32(dev, 0x4040d4, 0x00000000); nv_wr32(dev, 0x4040d8, 0x00000000); nv_wr32(dev, 0x4040dc, 0x00000000); nv_wr32(dev, 0x4040e0, 0x00000000); nv_wr32(dev, 0x4040e4, 0x00000000); nv_wr32(dev, 0x4040e8, 0x00001000); nv_wr32(dev, 0x4040f8, 0x00000000); nv_wr32(dev, 0x404130, 0x00000000); nv_wr32(dev, 0x404134, 0x00000000); nv_wr32(dev, 0x404138, 0x20000040); nv_wr32(dev, 0x404150, 0x0000002e); nv_wr32(dev, 0x404154, 0x00000400); nv_wr32(dev, 0x404158, 0x00000200); nv_wr32(dev, 0x404164, 0x00000055); nv_wr32(dev, 0x404168, 0x00000000); nv_wr32(dev, 0x404174, 0x00000000); nv_wr32(dev, 0x404178, 0x00000000); nv_wr32(dev, 0x40417c, 0x00000000); for (i = 0; i < 8; i++) nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */ } static void nvc0_grctx_generate_macro(struct drm_device *dev) { nv_wr32(dev, 0x404404, 0x00000000); nv_wr32(dev, 0x404408, 0x00000000); nv_wr32(dev, 0x40440c, 0x00000000); nv_wr32(dev, 0x404410, 0x00000000); nv_wr32(dev, 0x404414, 0x00000000); nv_wr32(dev, 0x404418, 0x00000000); nv_wr32(dev, 0x40441c, 0x00000000); nv_wr32(dev, 0x404420, 0x00000000); nv_wr32(dev, 0x404424, 0x00000000); nv_wr32(dev, 0x404428, 0x00000000); nv_wr32(dev, 0x40442c, 0x00000000); nv_wr32(dev, 0x404430, 0x00000000); nv_wr32(dev, 0x404434, 0x00000000); nv_wr32(dev, 0x404438, 0x00000000); nv_wr32(dev, 0x404460, 0x00000000); nv_wr32(dev, 0x404464, 0x00000000); nv_wr32(dev, 0x404468, 0x00ffffff); nv_wr32(dev, 0x40446c, 0x00000000); nv_wr32(dev, 0x404480, 0x00000001); nv_wr32(dev, 0x404498, 0x00000001); } static void nvc0_grctx_generate_m2mf(struct drm_device *dev) { nv_wr32(dev, 0x404604, 0x00000015); nv_wr32(dev, 0x404608, 0x00000000); nv_wr32(dev, 0x40460c, 0x00002e00); nv_wr32(dev, 0x404610, 0x00000100); nv_wr32(dev, 0x404618, 0x00000000); nv_wr32(dev, 0x40461c, 0x00000000); nv_wr32(dev, 0x404620, 0x00000000); nv_wr32(dev, 0x404624, 0x00000000); nv_wr32(dev, 0x404628, 0x00000000); nv_wr32(dev, 0x40462c, 0x00000000); nv_wr32(dev, 0x404630, 0x00000000); nv_wr32(dev, 0x404634, 0x00000000); nv_wr32(dev, 0x404638, 0x00000004); nv_wr32(dev, 0x40463c, 0x00000000); nv_wr32(dev, 0x404640, 0x00000000); nv_wr32(dev, 0x404644, 0x00000000); nv_wr32(dev, 0x404648, 0x00000000); nv_wr32(dev, 0x40464c, 0x00000000); nv_wr32(dev, 0x404650, 0x00000000); nv_wr32(dev, 0x404654, 0x00000000); nv_wr32(dev, 0x404658, 0x00000000); nv_wr32(dev, 0x40465c, 0x007f0100); nv_wr32(dev, 0x404660, 0x00000000); nv_wr32(dev, 0x404664, 0x00000000); nv_wr32(dev, 0x404668, 0x00000000); nv_wr32(dev, 0x40466c, 0x00000000); nv_wr32(dev, 0x404670, 0x00000000); nv_wr32(dev, 0x404674, 0x00000000); nv_wr32(dev, 0x404678, 0x00000000); nv_wr32(dev, 0x40467c, 0x00000002); nv_wr32(dev, 0x404680, 0x00000000); nv_wr32(dev, 0x404684, 0x00000000); nv_wr32(dev, 0x404688, 0x00000000); nv_wr32(dev, 0x40468c, 0x00000000); nv_wr32(dev, 0x404690, 0x00000000); nv_wr32(dev, 0x404694, 0x00000000); nv_wr32(dev, 0x404698, 0x00000000); nv_wr32(dev, 0x40469c, 0x00000000); nv_wr32(dev, 0x4046a0, 0x007f0080); nv_wr32(dev, 0x4046a4, 0x00000000); nv_wr32(dev, 0x4046a8, 0x00000000); nv_wr32(dev, 0x4046ac, 0x00000000); nv_wr32(dev, 0x4046b0, 0x00000000); nv_wr32(dev, 0x4046b4, 0x00000000); nv_wr32(dev, 0x4046b8, 0x00000000); nv_wr32(dev, 0x4046bc, 0x00000000); nv_wr32(dev, 0x4046c0, 0x00000000); nv_wr32(dev, 0x4046c4, 0x00000000); nv_wr32(dev, 0x4046c8, 0x00000000); nv_wr32(dev, 0x4046cc, 0x00000000); nv_wr32(dev, 0x4046d0, 0x00000000); nv_wr32(dev, 0x4046d4, 0x00000000); nv_wr32(dev, 0x4046d8, 0x00000000); nv_wr32(dev, 0x4046dc, 0x00000000); nv_wr32(dev, 0x4046e0, 0x00000000); nv_wr32(dev, 0x4046e4, 0x00000000); nv_wr32(dev, 0x4046e8, 0x00000000); nv_wr32(dev, 0x4046f0, 0x00000000); nv_wr32(dev, 0x4046f4, 0x00000000); } static void nvc0_grctx_generate_unk47xx(struct drm_device *dev) { nv_wr32(dev, 0x404700, 0x00000000); nv_wr32(dev, 0x404704, 0x00000000); nv_wr32(dev, 0x404708, 0x00000000); nv_wr32(dev, 0x40470c, 0x00000000); nv_wr32(dev, 0x404710, 0x00000000); nv_wr32(dev, 0x404714, 0x00000000); nv_wr32(dev, 0x404718, 0x00000000); nv_wr32(dev, 0x40471c, 0x00000000); nv_wr32(dev, 0x404720, 0x00000000); nv_wr32(dev, 0x404724, 0x00000000); nv_wr32(dev, 0x404728, 0x00000000); nv_wr32(dev, 0x40472c, 0x00000000); nv_wr32(dev, 0x404730, 0x00000000); nv_wr32(dev, 0x404734, 0x00000100); nv_wr32(dev, 0x404738, 0x00000000); nv_wr32(dev, 0x40473c, 0x00000000); nv_wr32(dev, 0x404740, 0x00000000); nv_wr32(dev, 0x404744, 0x00000000); nv_wr32(dev, 0x404748, 0x00000000); nv_wr32(dev, 0x40474c, 0x00000000); nv_wr32(dev, 0x404750, 0x00000000); nv_wr32(dev, 0x404754, 0x00000000); } static void nvc0_grctx_generate_shaders(struct drm_device *dev) { nv_wr32(dev, 0x405800, 0x078000bf); nv_wr32(dev, 0x405830, 0x02180000); nv_wr32(dev, 0x405834, 0x00000000); nv_wr32(dev, 0x405838, 0x00000000); nv_wr32(dev, 0x405854, 0x00000000); nv_wr32(dev, 0x405870, 0x00000001); nv_wr32(dev, 0x405874, 0x00000001); nv_wr32(dev, 0x405878, 0x00000001); nv_wr32(dev, 0x40587c, 0x00000001); nv_wr32(dev, 0x405a00, 0x00000000); nv_wr32(dev, 0x405a04, 0x00000000); nv_wr32(dev, 0x405a18, 0x00000000); } static void nvc0_grctx_generate_unk60xx(struct drm_device *dev) { nv_wr32(dev, 0x406020, 0x000103c1); nv_wr32(dev, 0x406028, 0x00000001); nv_wr32(dev, 0x40602c, 0x00000001); nv_wr32(dev, 0x406030, 0x00000001); nv_wr32(dev, 0x406034, 0x00000001); } static void nvc0_grctx_generate_unk64xx(struct drm_device *dev) { nv_wr32(dev, 0x4064a8, 0x00000000); nv_wr32(dev, 0x4064ac, 0x00003fff); nv_wr32(dev, 0x4064b4, 0x00000000); nv_wr32(dev, 0x4064b8, 0x00000000); } static void nvc0_grctx_generate_tpbus(struct drm_device *dev) { nv_wr32(dev, 0x407804, 0x00000023); nv_wr32(dev, 0x40780c, 0x0a418820); nv_wr32(dev, 0x407810, 0x062080e6); nv_wr32(dev, 0x407814, 0x020398a4); nv_wr32(dev, 0x407818, 0x0e629062); nv_wr32(dev, 0x40781c, 0x0a418820); nv_wr32(dev, 0x407820, 0x000000e6); nv_wr32(dev, 0x4078bc, 0x00000103); } static void nvc0_grctx_generate_ccache(struct drm_device *dev) { nv_wr32(dev, 0x408000, 0x00000000); nv_wr32(dev, 0x408004, 0x00000000); nv_wr32(dev, 0x408008, 0x00000018); nv_wr32(dev, 0x40800c, 0x00000000); nv_wr32(dev, 0x408010, 0x00000000); nv_wr32(dev, 0x408014, 0x00000069); nv_wr32(dev, 0x408018, 0xe100e100); nv_wr32(dev, 0x408064, 0x00000000); } static void nvc0_grctx_generate_rop(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* ROPC_BROADCAST */ nv_wr32(dev, 0x408800, 0x02802a3c); nv_wr32(dev, 0x408804, 0x00000040); nv_wr32(dev, 0x408808, 0x0003e00d); switch (dev_priv->chipset) { case 0xc0: nv_wr32(dev, 0x408900, 0x0080b801); break; case 0xc3: case 0xc4: nv_wr32(dev, 0x408900, 0x3080b801); break; } nv_wr32(dev, 0x408904, 0x02000001); nv_wr32(dev, 0x408908, 0x00c80929); nv_wr32(dev, 0x40890c, 0x00000000); nv_wr32(dev, 0x408980, 0x0000011d); } static void nvc0_grctx_generate_gpc(struct drm_device *dev) { int i; /* GPC_BROADCAST */ nv_wr32(dev, 0x418380, 0x00000016); nv_wr32(dev, 0x418400, 0x38004e00); nv_wr32(dev, 0x418404, 0x71e0ffff); nv_wr32(dev, 0x418408, 0x00000000); nv_wr32(dev, 0x41840c, 0x00001008); nv_wr32(dev, 0x418410, 0x0fff0fff); nv_wr32(dev, 0x418414, 0x00200fff); nv_wr32(dev, 0x418450, 0x00000000); nv_wr32(dev, 0x418454, 0x00000000); nv_wr32(dev, 0x418458, 0x00000000); nv_wr32(dev, 0x41845c, 0x00000000); nv_wr32(dev, 0x418460, 0x00000000); nv_wr32(dev, 0x418464, 0x00000000); nv_wr32(dev, 0x418468, 0x00000001); nv_wr32(dev, 0x41846c, 0x00000000); nv_wr32(dev, 0x418470, 0x00000000); nv_wr32(dev, 0x418600, 0x0000001f); nv_wr32(dev, 0x418684, 0x0000000f); nv_wr32(dev, 0x418700, 0x00000002); nv_wr32(dev, 0x418704, 0x00000080); nv_wr32(dev, 0x418708, 0x00000000); nv_wr32(dev, 0x41870c, 0x07c80000); nv_wr32(dev, 0x418710, 0x00000000); nv_wr32(dev, 0x418800, 0x0006860a); nv_wr32(dev, 0x418808, 0x00000000); nv_wr32(dev, 0x41880c, 0x00000000); nv_wr32(dev, 0x418810, 0x00000000); nv_wr32(dev, 0x418828, 0x00008442); nv_wr32(dev, 0x418830, 0x00000001); nv_wr32(dev, 0x4188d8, 0x00000008); nv_wr32(dev, 0x4188e0, 0x01000000); nv_wr32(dev, 0x4188e8, 0x00000000); nv_wr32(dev, 0x4188ec, 0x00000000); nv_wr32(dev, 0x4188f0, 0x00000000); nv_wr32(dev, 0x4188f4, 0x00000000); nv_wr32(dev, 0x4188f8, 0x00000000); nv_wr32(dev, 0x4188fc, 0x00100000); nv_wr32(dev, 0x41891c, 0x00ff00ff); nv_wr32(dev, 0x418924, 0x00000000); nv_wr32(dev, 0x418928, 0x00ffff00); nv_wr32(dev, 0x41892c, 0x0000ff00); for (i = 0; i < 8; i++) { nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000); nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000); nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000); nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000); nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000); nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000); nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000); } nv_wr32(dev, 0x418b00, 0x00000000); nv_wr32(dev, 0x418b08, 0x0a418820); nv_wr32(dev, 0x418b0c, 0x062080e6); nv_wr32(dev, 0x418b10, 0x020398a4); nv_wr32(dev, 0x418b14, 0x0e629062); nv_wr32(dev, 0x418b18, 0x0a418820); nv_wr32(dev, 0x418b1c, 0x000000e6); nv_wr32(dev, 0x418bb8, 0x00000103); nv_wr32(dev, 0x418c08, 0x00000001); nv_wr32(dev, 0x418c10, 0x00000000); nv_wr32(dev, 0x418c14, 0x00000000); nv_wr32(dev, 0x418c18, 0x00000000); nv_wr32(dev, 0x418c1c, 0x00000000); nv_wr32(dev, 0x418c20, 0x00000000); nv_wr32(dev, 0x418c24, 0x00000000); nv_wr32(dev, 0x418c28, 0x00000000); nv_wr32(dev, 0x418c2c, 0x00000000); nv_wr32(dev, 0x418c80, 0x20200004); nv_wr32(dev, 0x418c8c, 0x00000001); nv_wr32(dev, 0x419000, 0x00000780); nv_wr32(dev, 0x419004, 0x00000000); nv_wr32(dev, 0x419008, 0x00000000); nv_wr32(dev, 0x419014, 0x00000004); } static void nvc0_grctx_generate_tp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* GPC_BROADCAST.TP_BROADCAST */ nv_wr32(dev, 0x419848, 0x00000000); nv_wr32(dev, 0x419864, 0x0000012a); nv_wr32(dev, 0x419888, 0x00000000); nv_wr32(dev, 0x419a00, 0x000001f0); nv_wr32(dev, 0x419a04, 0x00000001); nv_wr32(dev, 0x419a08, 0x00000023); nv_wr32(dev, 0x419a0c, 0x00020000); nv_wr32(dev, 0x419a10, 0x00000000); nv_wr32(dev, 0x419a14, 0x00000200); nv_wr32(dev, 0x419a1c, 0x00000000); nv_wr32(dev, 0x419a20, 0x00000800); if (dev_priv->chipset != 0xc0) nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ nv_wr32(dev, 0x419b00, 0x0a418820); nv_wr32(dev, 0x419b04, 0x062080e6); nv_wr32(dev, 0x419b08, 0x020398a4); nv_wr32(dev, 0x419b0c, 0x0e629062); nv_wr32(dev, 0x419b10, 0x0a418820); nv_wr32(dev, 0x419b14, 0x000000e6); nv_wr32(dev, 0x419bd0, 0x00900103); nv_wr32(dev, 0x419be0, 0x00000001); nv_wr32(dev, 0x419be4, 0x00000000); nv_wr32(dev, 0x419c00, 0x00000002); nv_wr32(dev, 0x419c04, 0x00000006); nv_wr32(dev, 0x419c08, 0x00000002); nv_wr32(dev, 0x419c20, 0x00000000); nv_wr32(dev, 0x419cbc, 0x28137606); nv_wr32(dev, 0x419ce8, 0x00000000); nv_wr32(dev, 0x419cf4, 0x00000183); nv_wr32(dev, 0x419d20, 0x02180000); nv_wr32(dev, 0x419d24, 0x00001fff); nv_wr32(dev, 0x419e04, 0x00000000); nv_wr32(dev, 0x419e08, 0x00000000); nv_wr32(dev, 0x419e0c, 0x00000000); nv_wr32(dev, 0x419e10, 0x00000002); nv_wr32(dev, 0x419e44, 0x001beff2); nv_wr32(dev, 0x419e48, 0x00000000); nv_wr32(dev, 0x419e4c, 0x0000000f); nv_wr32(dev, 0x419e50, 0x00000000); nv_wr32(dev, 0x419e54, 0x00000000); nv_wr32(dev, 0x419e58, 0x00000000); nv_wr32(dev, 0x419e5c, 0x00000000); nv_wr32(dev, 0x419e60, 0x00000000); nv_wr32(dev, 0x419e64, 0x00000000); nv_wr32(dev, 0x419e68, 0x00000000); nv_wr32(dev, 0x419e6c, 0x00000000); nv_wr32(dev, 0x419e70, 0x00000000); nv_wr32(dev, 0x419e74, 0x00000000); nv_wr32(dev, 0x419e78, 0x00000000); nv_wr32(dev, 0x419e7c, 0x00000000); nv_wr32(dev, 0x419e80, 0x00000000); nv_wr32(dev, 0x419e84, 0x00000000); nv_wr32(dev, 0x419e88, 0x00000000); nv_wr32(dev, 0x419e8c, 0x00000000); nv_wr32(dev, 0x419e90, 0x00000000); nv_wr32(dev, 0x419e98, 0x00000000); if (dev_priv->chipset != 0xc0) nv_wr32(dev, 0x419ee0, 0x00011110); nv_wr32(dev, 0x419f50, 0x00000000); nv_wr32(dev, 0x419f54, 0x00000000); if (dev_priv->chipset != 0xc0) nv_wr32(dev, 0x419f58, 0x00000000); } int nvc0_grctx_generate(struct nouveau_channel *chan) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct drm_device *dev = chan->dev; int i, gpc, tp, id; u32 r000260, tmp; r000260 = nv_rd32(dev, 0x000260); nv_wr32(dev, 0x000260, r000260 & ~1); nv_wr32(dev, 0x400208, 0x00000000); nvc0_grctx_generate_dispatch(dev); nvc0_grctx_generate_macro(dev); nvc0_grctx_generate_m2mf(dev); nvc0_grctx_generate_unk47xx(dev); nvc0_grctx_generate_shaders(dev); nvc0_grctx_generate_unk60xx(dev); nvc0_grctx_generate_unk64xx(dev); nvc0_grctx_generate_tpbus(dev); nvc0_grctx_generate_ccache(dev); nvc0_grctx_generate_rop(dev); nvc0_grctx_generate_gpc(dev); nvc0_grctx_generate_tp(dev); nv_wr32(dev, 0x404154, 0x00000000); /* fuc "mmio list" writes */ for (i = 0; i < grch->mmio_nr * 8; i += 8) { u32 reg = nv_ro32(grch->mmio, i + 0); nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4)); } for (tp = 0, id = 0; tp < 4; tp++) { for (gpc = 0; gpc < priv->gpc_nr; gpc++) { if (tp < priv->tp_nr[gpc]) { nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id); id++; } nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]); nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]); } } tmp = 0; for (i = 0; i < priv->gpc_nr; i++) tmp |= priv->tp_nr[i] << (i * 4); nv_wr32(dev, 0x406028, tmp); nv_wr32(dev, 0x405870, tmp); nv_wr32(dev, 0x40602c, 0x00000000); nv_wr32(dev, 0x405874, 0x00000000); nv_wr32(dev, 0x406030, 0x00000000); nv_wr32(dev, 0x405878, 0x00000000); nv_wr32(dev, 0x406034, 0x00000000); nv_wr32(dev, 0x40587c, 0x00000000); if (1) { const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; u8 tpnr[GPC_MAX]; u8 data[32]; memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); memset(data, 0x1f, sizeof(data)); gpc = -1; for (tp = 0; tp < priv->tp_total; tp++) { do { gpc = (gpc + 1) % priv->gpc_nr; } while (!tpnr[gpc]); tpnr[gpc]--; data[tp] = gpc; } for (i = 0; i < max / 4; i++) nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); } if (1) { u32 data[6] = {}, data2[2] = {}; u8 tpnr[GPC_MAX]; u8 shift, ntpcv; /* calculate first set of magics */ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); for (tp = 0; tp < priv->tp_total; tp++) { do { gpc = (gpc + 1) % priv->gpc_nr; } while (!tpnr[gpc]); tpnr[gpc]--; data[tp / 6] |= gpc << ((tp % 6) * 5); } for (; tp < 32; tp++) data[tp / 6] |= 7 << ((tp % 6) * 5); /* and the second... */ shift = 0; ntpcv = priv->tp_total; while (!(ntpcv & (1 << 4))) { ntpcv <<= 1; shift++; } data2[0] = (ntpcv << 16); data2[0] |= (shift << 21); data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); for (i = 1; i < 7; i++) data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); /* GPC_BROADCAST */ nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | priv->magic_not_rop_nr); for (i = 0; i < 6; i++) nv_wr32(dev, 0x418b08 + (i * 4), data[i]); /* GPC_BROADCAST.TP_BROADCAST */ nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | priv->magic_not_rop_nr | data2[0]); nv_wr32(dev, 0x419be4, data2[1]); for (i = 0; i < 6; i++) nv_wr32(dev, 0x419b00 + (i * 4), data[i]); /* UNK78xx */ nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | priv->magic_not_rop_nr); for (i = 0; i < 6; i++) nv_wr32(dev, 0x40780c + (i * 4), data[i]); } if (1) { u32 tp_mask = 0, tp_set = 0; u8 tpnr[GPC_MAX]; memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); for (gpc = 0; gpc < priv->gpc_nr; gpc++) tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); gpc = -1; for (i = 0, gpc = -1; i < 32; i++) { int ltp = i * (priv->tp_total - 1) / 32; do { gpc = (gpc + 1) % priv->gpc_nr; } while (!tpnr[gpc]); tp = priv->tp_nr[gpc] - tpnr[gpc]--; tp_set |= 1 << ((gpc * 8) + tp); do { nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); tp_set ^= tp_mask; nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); tp_set ^= tp_mask; } while (ltp == (++i * (priv->tp_total - 1) / 32)); i--; } } nv_wr32(dev, 0x400208, 0x80000000); nv_icmd(dev, 0x00001000, 0x00000004); nv_icmd(dev, 0x000000a9, 0x0000ffff); nv_icmd(dev, 0x00000038, 0x0fac6881); nv_icmd(dev, 0x0000003d, 0x00000001); nv_icmd(dev, 0x000000e8, 0x00000400); nv_icmd(dev, 0x000000e9, 0x00000400); nv_icmd(dev, 0x000000ea, 0x00000400); nv_icmd(dev, 0x000000eb, 0x00000400); nv_icmd(dev, 0x000000ec, 0x00000400); nv_icmd(dev, 0x000000ed, 0x00000400); nv_icmd(dev, 0x000000ee, 0x00000400); nv_icmd(dev, 0x000000ef, 0x00000400); nv_icmd(dev, 0x00000078, 0x00000300); nv_icmd(dev, 0x00000079, 0x00000300); nv_icmd(dev, 0x0000007a, 0x00000300); nv_icmd(dev, 0x0000007b, 0x00000300); nv_icmd(dev, 0x0000007c, 0x00000300); nv_icmd(dev, 0x0000007d, 0x00000300); nv_icmd(dev, 0x0000007e, 0x00000300); nv_icmd(dev, 0x0000007f, 0x00000300); nv_icmd(dev, 0x00000050, 0x00000011); nv_icmd(dev, 0x00000058, 0x00000008); nv_icmd(dev, 0x00000059, 0x00000008); nv_icmd(dev, 0x0000005a, 0x00000008); nv_icmd(dev, 0x0000005b, 0x00000008); nv_icmd(dev, 0x0000005c, 0x00000008); nv_icmd(dev, 0x0000005d, 0x00000008); nv_icmd(dev, 0x0000005e, 0x00000008); nv_icmd(dev, 0x0000005f, 0x00000008); nv_icmd(dev, 0x00000208, 0x00000001); nv_icmd(dev, 0x00000209, 0x00000001); nv_icmd(dev, 0x0000020a, 0x00000001); nv_icmd(dev, 0x0000020b, 0x00000001); nv_icmd(dev, 0x0000020c, 0x00000001); nv_icmd(dev, 0x0000020d, 0x00000001); nv_icmd(dev, 0x0000020e, 0x00000001); nv_icmd(dev, 0x0000020f, 0x00000001); nv_icmd(dev, 0x00000081, 0x00000001); nv_icmd(dev, 0x00000085, 0x00000004); nv_icmd(dev, 0x00000088, 0x00000400); nv_icmd(dev, 0x00000090, 0x00000300); nv_icmd(dev, 0x00000098, 0x00001001); nv_icmd(dev, 0x000000e3, 0x00000001); nv_icmd(dev, 0x000000da, 0x00000001); nv_icmd(dev, 0x000000f8, 0x00000003); nv_icmd(dev, 0x000000fa, 0x00000001); nv_icmd(dev, 0x0000009f, 0x0000ffff); nv_icmd(dev, 0x000000a0, 0x0000ffff); nv_icmd(dev, 0x000000a1, 0x0000ffff); nv_icmd(dev, 0x000000a2, 0x0000ffff); nv_icmd(dev, 0x000000b1, 0x00000001); nv_icmd(dev, 0x000000b2, 0x00000000); nv_icmd(dev, 0x000000b3, 0x00000000); nv_icmd(dev, 0x000000b4, 0x00000000); nv_icmd(dev, 0x000000b5, 0x00000000); nv_icmd(dev, 0x000000b6, 0x00000000); nv_icmd(dev, 0x000000b7, 0x00000000); nv_icmd(dev, 0x000000b8, 0x00000000); nv_icmd(dev, 0x000000b9, 0x00000000); nv_icmd(dev, 0x000000ba, 0x00000000); nv_icmd(dev, 0x000000bb, 0x00000000); nv_icmd(dev, 0x000000bc, 0x00000000); nv_icmd(dev, 0x000000bd, 0x00000000); nv_icmd(dev, 0x000000be, 0x00000000); nv_icmd(dev, 0x000000bf, 0x00000000); nv_icmd(dev, 0x000000c0, 0x00000000); nv_icmd(dev, 0x000000c1, 0x00000000); nv_icmd(dev, 0x000000c2, 0x00000000); nv_icmd(dev, 0x000000c3, 0x00000000); nv_icmd(dev, 0x000000c4, 0x00000000); nv_icmd(dev, 0x000000c5, 0x00000000); nv_icmd(dev, 0x000000c6, 0x00000000); nv_icmd(dev, 0x000000c7, 0x00000000); nv_icmd(dev, 0x000000c8, 0x00000000); nv_icmd(dev, 0x000000c9, 0x00000000); nv_icmd(dev, 0x000000ca, 0x00000000); nv_icmd(dev, 0x000000cb, 0x00000000); nv_icmd(dev, 0x000000cc, 0x00000000); nv_icmd(dev, 0x000000cd, 0x00000000); nv_icmd(dev, 0x000000ce, 0x00000000); nv_icmd(dev, 0x000000cf, 0x00000000); nv_icmd(dev, 0x000000d0, 0x00000000); nv_icmd(dev, 0x000000d1, 0x00000000); nv_icmd(dev, 0x000000d2, 0x00000000); nv_icmd(dev, 0x000000d3, 0x00000000); nv_icmd(dev, 0x000000d4, 0x00000000); nv_icmd(dev, 0x000000d5, 0x00000000); nv_icmd(dev, 0x000000d6, 0x00000000); nv_icmd(dev, 0x000000d7, 0x00000000); nv_icmd(dev, 0x000000d8, 0x00000000); nv_icmd(dev, 0x000000d9, 0x00000000); nv_icmd(dev, 0x00000210, 0x00000040); nv_icmd(dev, 0x00000211, 0x00000040); nv_icmd(dev, 0x00000212, 0x00000040); nv_icmd(dev, 0x00000213, 0x00000040); nv_icmd(dev, 0x00000214, 0x00000040); nv_icmd(dev, 0x00000215, 0x00000040); nv_icmd(dev, 0x00000216, 0x00000040); nv_icmd(dev, 0x00000217, 0x00000040); nv_icmd(dev, 0x00000218, 0x0000c080); nv_icmd(dev, 0x00000219, 0x0000c080); nv_icmd(dev, 0x0000021a, 0x0000c080); nv_icmd(dev, 0x0000021b, 0x0000c080); nv_icmd(dev, 0x0000021c, 0x0000c080); nv_icmd(dev, 0x0000021d, 0x0000c080); nv_icmd(dev, 0x0000021e, 0x0000c080); nv_icmd(dev, 0x0000021f, 0x0000c080); nv_icmd(dev, 0x000000ad, 0x0000013e); nv_icmd(dev, 0x000000e1, 0x00000010); nv_icmd(dev, 0x00000290, 0x00000000); nv_icmd(dev, 0x00000291, 0x00000000); nv_icmd(dev, 0x00000292, 0x00000000); nv_icmd(dev, 0x00000293, 0x00000000); nv_icmd(dev, 0x00000294, 0x00000000); nv_icmd(dev, 0x00000295, 0x00000000); nv_icmd(dev, 0x00000296, 0x00000000); nv_icmd(dev, 0x00000297, 0x00000000); nv_icmd(dev, 0x00000298, 0x00000000); nv_icmd(dev, 0x00000299, 0x00000000); nv_icmd(dev, 0x0000029a, 0x00000000); nv_icmd(dev, 0x0000029b, 0x00000000); nv_icmd(dev, 0x0000029c, 0x00000000); nv_icmd(dev, 0x0000029d, 0x00000000); nv_icmd(dev, 0x0000029e, 0x00000000); nv_icmd(dev, 0x0000029f, 0x00000000); nv_icmd(dev, 0x000003b0, 0x00000000); nv_icmd(dev, 0x000003b1, 0x00000000); nv_icmd(dev, 0x000003b2, 0x00000000); nv_icmd(dev, 0x000003b3, 0x00000000); nv_icmd(dev, 0x000003b4, 0x00000000); nv_icmd(dev, 0x000003b5, 0x00000000); nv_icmd(dev, 0x000003b6, 0x00000000); nv_icmd(dev, 0x000003b7, 0x00000000); nv_icmd(dev, 0x000003b8, 0x00000000); nv_icmd(dev, 0x000003b9, 0x00000000); nv_icmd(dev, 0x000003ba, 0x00000000); nv_icmd(dev, 0x000003bb, 0x00000000); nv_icmd(dev, 0x000003bc, 0x00000000); nv_icmd(dev, 0x000003bd, 0x00000000); nv_icmd(dev, 0x000003be, 0x00000000); nv_icmd(dev, 0x000003bf, 0x00000000); nv_icmd(dev, 0x000002a0, 0x00000000); nv_icmd(dev, 0x000002a1, 0x00000000); nv_icmd(dev, 0x000002a2, 0x00000000); nv_icmd(dev, 0x000002a3, 0x00000000); nv_icmd(dev, 0x000002a4, 0x00000000); nv_icmd(dev, 0x000002a5, 0x00000000); nv_icmd(dev, 0x000002a6, 0x00000000); nv_icmd(dev, 0x000002a7, 0x00000000); nv_icmd(dev, 0x000002a8, 0x00000000); nv_icmd(dev, 0x000002a9, 0x00000000); nv_icmd(dev, 0x000002aa, 0x00000000); nv_icmd(dev, 0x000002ab, 0x00000000); nv_icmd(dev, 0x000002ac, 0x00000000); nv_icmd(dev, 0x000002ad, 0x00000000); nv_icmd(dev, 0x000002ae, 0x00000000); nv_icmd(dev, 0x000002af, 0x00000000); nv_icmd(dev, 0x00000420, 0x00000000); nv_icmd(dev, 0x00000421, 0x00000000); nv_icmd(dev, 0x00000422, 0x00000000); nv_icmd(dev, 0x00000423, 0x00000000); nv_icmd(dev, 0x00000424, 0x00000000); nv_icmd(dev, 0x00000425, 0x00000000); nv_icmd(dev, 0x00000426, 0x00000000); nv_icmd(dev, 0x00000427, 0x00000000); nv_icmd(dev, 0x00000428, 0x00000000); nv_icmd(dev, 0x00000429, 0x00000000); nv_icmd(dev, 0x0000042a, 0x00000000); nv_icmd(dev, 0x0000042b, 0x00000000); nv_icmd(dev, 0x0000042c, 0x00000000); nv_icmd(dev, 0x0000042d, 0x00000000); nv_icmd(dev, 0x0000042e, 0x00000000); nv_icmd(dev, 0x0000042f, 0x00000000); nv_icmd(dev, 0x000002b0, 0x00000000); nv_icmd(dev, 0x000002b1, 0x00000000); nv_icmd(dev, 0x000002b2, 0x00000000); nv_icmd(dev, 0x000002b3, 0x00000000); nv_icmd(dev, 0x000002b4, 0x00000000); nv_icmd(dev, 0x000002b5, 0x00000000); nv_icmd(dev, 0x000002b6, 0x00000000); nv_icmd(dev, 0x000002b7, 0x00000000); nv_icmd(dev, 0x000002b8, 0x00000000); nv_icmd(dev, 0x000002b9, 0x00000000); nv_icmd(dev, 0x000002ba, 0x00000000); nv_icmd(dev, 0x000002bb, 0x00000000); nv_icmd(dev, 0x000002bc, 0x00000000); nv_icmd(dev, 0x000002bd, 0x00000000); nv_icmd(dev, 0x000002be, 0x00000000); nv_icmd(dev, 0x000002bf, 0x00000000); nv_icmd(dev, 0x00000430, 0x00000000); nv_icmd(dev, 0x00000431, 0x00000000); nv_icmd(dev, 0x00000432, 0x00000000); nv_icmd(dev, 0x00000433, 0x00000000); nv_icmd(dev, 0x00000434, 0x00000000); nv_icmd(dev, 0x00000435, 0x00000000); nv_icmd(dev, 0x00000436, 0x00000000); nv_icmd(dev, 0x00000437, 0x00000000); nv_icmd(dev, 0x00000438, 0x00000000); nv_icmd(dev, 0x00000439, 0x00000000); nv_icmd(dev, 0x0000043a, 0x00000000); nv_icmd(dev, 0x0000043b, 0x00000000); nv_icmd(dev, 0x0000043c, 0x00000000); nv_icmd(dev, 0x0000043d, 0x00000000); nv_icmd(dev, 0x0000043e, 0x00000000); nv_icmd(dev, 0x0000043f, 0x00000000); nv_icmd(dev, 0x000002c0, 0x00000000); nv_icmd(dev, 0x000002c1, 0x00000000); nv_icmd(dev, 0x000002c2, 0x00000000); nv_icmd(dev, 0x000002c3, 0x00000000); nv_icmd(dev, 0x000002c4, 0x00000000); nv_icmd(dev, 0x000002c5, 0x00000000); nv_icmd(dev, 0x000002c6, 0x00000000); nv_icmd(dev, 0x000002c7, 0x00000000); nv_icmd(dev, 0x000002c8, 0x00000000); nv_icmd(dev, 0x000002c9, 0x00000000); nv_icmd(dev, 0x000002ca, 0x00000000); nv_icmd(dev, 0x000002cb, 0x00000000); nv_icmd(dev, 0x000002cc, 0x00000000); nv_icmd(dev, 0x000002cd, 0x00000000); nv_icmd(dev, 0x000002ce, 0x00000000); nv_icmd(dev, 0x000002cf, 0x00000000); nv_icmd(dev, 0x000004d0, 0x00000000); nv_icmd(dev, 0x000004d1, 0x00000000); nv_icmd(dev, 0x000004d2, 0x00000000); nv_icmd(dev, 0x000004d3, 0x00000000); nv_icmd(dev, 0x000004d4, 0x00000000); nv_icmd(dev, 0x000004d5, 0x00000000); nv_icmd(dev, 0x000004d6, 0x00000000); nv_icmd(dev, 0x000004d7, 0x00000000); nv_icmd(dev, 0x000004d8, 0x00000000); nv_icmd(dev, 0x000004d9, 0x00000000); nv_icmd(dev, 0x000004da, 0x00000000); nv_icmd(dev, 0x000004db, 0x00000000); nv_icmd(dev, 0x000004dc, 0x00000000); nv_icmd(dev, 0x000004dd, 0x00000000); nv_icmd(dev, 0x000004de, 0x00000000); nv_icmd(dev, 0x000004df, 0x00000000); nv_icmd(dev, 0x00000720, 0x00000000); nv_icmd(dev, 0x00000721, 0x00000000); nv_icmd(dev, 0x00000722, 0x00000000); nv_icmd(dev, 0x00000723, 0x00000000); nv_icmd(dev, 0x00000724, 0x00000000); nv_icmd(dev, 0x00000725, 0x00000000); nv_icmd(dev, 0x00000726, 0x00000000); nv_icmd(dev, 0x00000727, 0x00000000); nv_icmd(dev, 0x00000728, 0x00000000); nv_icmd(dev, 0x00000729, 0x00000000); nv_icmd(dev, 0x0000072a, 0x00000000); nv_icmd(dev, 0x0000072b, 0x00000000); nv_icmd(dev, 0x0000072c, 0x00000000); nv_icmd(dev, 0x0000072d, 0x00000000); nv_icmd(dev, 0x0000072e, 0x00000000); nv_icmd(dev, 0x0000072f, 0x00000000); nv_icmd(dev, 0x000008c0, 0x00000000); nv_icmd(dev, 0x000008c1, 0x00000000); nv_icmd(dev, 0x000008c2, 0x00000000); nv_icmd(dev, 0x000008c3, 0x00000000); nv_icmd(dev, 0x000008c4, 0x00000000); nv_icmd(dev, 0x000008c5, 0x00000000); nv_icmd(dev, 0x000008c6, 0x00000000); nv_icmd(dev, 0x000008c7, 0x00000000); nv_icmd(dev, 0x000008c8, 0x00000000); nv_icmd(dev, 0x000008c9, 0x00000000); nv_icmd(dev, 0x000008ca, 0x00000000); nv_icmd(dev, 0x000008cb, 0x00000000); nv_icmd(dev, 0x000008cc, 0x00000000); nv_icmd(dev, 0x000008cd, 0x00000000); nv_icmd(dev, 0x000008ce, 0x00000000); nv_icmd(dev, 0x000008cf, 0x00000000); nv_icmd(dev, 0x00000890, 0x00000000); nv_icmd(dev, 0x00000891, 0x00000000); nv_icmd(dev, 0x00000892, 0x00000000); nv_icmd(dev, 0x00000893, 0x00000000); nv_icmd(dev, 0x00000894, 0x00000000); nv_icmd(dev, 0x00000895, 0x00000000); nv_icmd(dev, 0x00000896, 0x00000000); nv_icmd(dev, 0x00000897, 0x00000000); nv_icmd(dev, 0x00000898, 0x00000000); nv_icmd(dev, 0x00000899, 0x00000000); nv_icmd(dev, 0x0000089a, 0x00000000); nv_icmd(dev, 0x0000089b, 0x00000000); nv_icmd(dev, 0x0000089c, 0x00000000); nv_icmd(dev, 0x0000089d, 0x00000000); nv_icmd(dev, 0x0000089e, 0x00000000); nv_icmd(dev, 0x0000089f, 0x00000000); nv_icmd(dev, 0x000008e0, 0x00000000); nv_icmd(dev, 0x000008e1, 0x00000000); nv_icmd(dev, 0x000008e2, 0x00000000); nv_icmd(dev, 0x000008e3, 0x00000000); nv_icmd(dev, 0x000008e4, 0x00000000); nv_icmd(dev, 0x000008e5, 0x00000000); nv_icmd(dev, 0x000008e6, 0x00000000); nv_icmd(dev, 0x000008e7, 0x00000000); nv_icmd(dev, 0x000008e8, 0x00000000); nv_icmd(dev, 0x000008e9, 0x00000000); nv_icmd(dev, 0x000008ea, 0x00000000); nv_icmd(dev, 0x000008eb, 0x00000000); nv_icmd(dev, 0x000008ec, 0x00000000); nv_icmd(dev, 0x000008ed, 0x00000000); nv_icmd(dev, 0x000008ee, 0x00000000); nv_icmd(dev, 0x000008ef, 0x00000000); nv_icmd(dev, 0x000008a0, 0x00000000); nv_icmd(dev, 0x000008a1, 0x00000000); nv_icmd(dev, 0x000008a2, 0x00000000); nv_icmd(dev, 0x000008a3, 0x00000000); nv_icmd(dev, 0x000008a4, 0x00000000); nv_icmd(dev, 0x000008a5, 0x00000000); nv_icmd(dev, 0x000008a6, 0x00000000); nv_icmd(dev, 0x000008a7, 0x00000000); nv_icmd(dev, 0x000008a8, 0x00000000); nv_icmd(dev, 0x000008a9, 0x00000000); nv_icmd(dev, 0x000008aa, 0x00000000); nv_icmd(dev, 0x000008ab, 0x00000000); nv_icmd(dev, 0x000008ac, 0x00000000); nv_icmd(dev, 0x000008ad, 0x00000000); nv_icmd(dev, 0x000008ae, 0x00000000); nv_icmd(dev, 0x000008af, 0x00000000); nv_icmd(dev, 0x000008f0, 0x00000000); nv_icmd(dev, 0x000008f1, 0x00000000); nv_icmd(dev, 0x000008f2, 0x00000000); nv_icmd(dev, 0x000008f3, 0x00000000); nv_icmd(dev, 0x000008f4, 0x00000000); nv_icmd(dev, 0x000008f5, 0x00000000); nv_icmd(dev, 0x000008f6, 0x00000000); nv_icmd(dev, 0x000008f7, 0x00000000); nv_icmd(dev, 0x000008f8, 0x00000000); nv_icmd(dev, 0x000008f9, 0x00000000); nv_icmd(dev, 0x000008fa, 0x00000000); nv_icmd(dev, 0x000008fb, 0x00000000); nv_icmd(dev, 0x000008fc, 0x00000000); nv_icmd(dev, 0x000008fd, 0x00000000); nv_icmd(dev, 0x000008fe, 0x00000000); nv_icmd(dev, 0x000008ff, 0x00000000); nv_icmd(dev, 0x0000094c, 0x000000ff); nv_icmd(dev, 0x0000094d, 0xffffffff); nv_icmd(dev, 0x0000094e, 0x00000002); nv_icmd(dev, 0x000002ec, 0x00000001); nv_icmd(dev, 0x00000303, 0x00000001); nv_icmd(dev, 0x000002e6, 0x00000001); nv_icmd(dev, 0x00000466, 0x00000052); nv_icmd(dev, 0x00000301, 0x3f800000); nv_icmd(dev, 0x00000304, 0x30201000); nv_icmd(dev, 0x00000305, 0x70605040); nv_icmd(dev, 0x00000306, 0xb8a89888); nv_icmd(dev, 0x00000307, 0xf8e8d8c8); nv_icmd(dev, 0x0000030a, 0x00ffff00); nv_icmd(dev, 0x0000030b, 0x0000001a); nv_icmd(dev, 0x0000030c, 0x00000001); nv_icmd(dev, 0x00000318, 0x00000001); nv_icmd(dev, 0x00000340, 0x00000000); nv_icmd(dev, 0x00000375, 0x00000001); nv_icmd(dev, 0x00000351, 0x00000100); nv_icmd(dev, 0x0000037d, 0x00000006); nv_icmd(dev, 0x000003a0, 0x00000002); nv_icmd(dev, 0x000003aa, 0x00000001); nv_icmd(dev, 0x000003a9, 0x00000001); nv_icmd(dev, 0x00000380, 0x00000001); nv_icmd(dev, 0x00000360, 0x00000040); nv_icmd(dev, 0x00000366, 0x00000000); nv_icmd(dev, 0x00000367, 0x00000000); nv_icmd(dev, 0x00000368, 0x00001fff); nv_icmd(dev, 0x00000370, 0x00000000); nv_icmd(dev, 0x00000371, 0x00000000); nv_icmd(dev, 0x00000372, 0x003fffff); nv_icmd(dev, 0x0000037a, 0x00000012); nv_icmd(dev, 0x000005e0, 0x00000022); nv_icmd(dev, 0x000005e1, 0x00000022); nv_icmd(dev, 0x000005e2, 0x00000022); nv_icmd(dev, 0x000005e3, 0x00000022); nv_icmd(dev, 0x000005e4, 0x00000022); nv_icmd(dev, 0x00000619, 0x00000003); nv_icmd(dev, 0x00000811, 0x00000003); nv_icmd(dev, 0x00000812, 0x00000004); nv_icmd(dev, 0x00000813, 0x00000006); nv_icmd(dev, 0x00000814, 0x00000008); nv_icmd(dev, 0x00000815, 0x0000000b); nv_icmd(dev, 0x00000800, 0x00000001); nv_icmd(dev, 0x00000801, 0x00000001); nv_icmd(dev, 0x00000802, 0x00000001); nv_icmd(dev, 0x00000803, 0x00000001); nv_icmd(dev, 0x00000804, 0x00000001); nv_icmd(dev, 0x00000805, 0x00000001); nv_icmd(dev, 0x00000632, 0x00000001); nv_icmd(dev, 0x00000633, 0x00000002); nv_icmd(dev, 0x00000634, 0x00000003); nv_icmd(dev, 0x00000635, 0x00000004); nv_icmd(dev, 0x00000654, 0x3f800000); nv_icmd(dev, 0x00000657, 0x3f800000); nv_icmd(dev, 0x00000655, 0x3f800000); nv_icmd(dev, 0x00000656, 0x3f800000); nv_icmd(dev, 0x000006cd, 0x3f800000); nv_icmd(dev, 0x000007f5, 0x3f800000); nv_icmd(dev, 0x000007dc, 0x39291909); nv_icmd(dev, 0x000007dd, 0x79695949); nv_icmd(dev, 0x000007de, 0xb9a99989); nv_icmd(dev, 0x000007df, 0xf9e9d9c9); nv_icmd(dev, 0x000007e8, 0x00003210); nv_icmd(dev, 0x000007e9, 0x00007654); nv_icmd(dev, 0x000007ea, 0x00000098); nv_icmd(dev, 0x000007ec, 0x39291909); nv_icmd(dev, 0x000007ed, 0x79695949); nv_icmd(dev, 0x000007ee, 0xb9a99989); nv_icmd(dev, 0x000007ef, 0xf9e9d9c9); nv_icmd(dev, 0x000007f0, 0x00003210); nv_icmd(dev, 0x000007f1, 0x00007654); nv_icmd(dev, 0x000007f2, 0x00000098); nv_icmd(dev, 0x000005a5, 0x00000001); nv_icmd(dev, 0x00000980, 0x00000000); nv_icmd(dev, 0x00000981, 0x00000000); nv_icmd(dev, 0x00000982, 0x00000000); nv_icmd(dev, 0x00000983, 0x00000000); nv_icmd(dev, 0x00000984, 0x00000000); nv_icmd(dev, 0x00000985, 0x00000000); nv_icmd(dev, 0x00000986, 0x00000000); nv_icmd(dev, 0x00000987, 0x00000000); nv_icmd(dev, 0x00000988, 0x00000000); nv_icmd(dev, 0x00000989, 0x00000000); nv_icmd(dev, 0x0000098a, 0x00000000); nv_icmd(dev, 0x0000098b, 0x00000000); nv_icmd(dev, 0x0000098c, 0x00000000); nv_icmd(dev, 0x0000098d, 0x00000000); nv_icmd(dev, 0x0000098e, 0x00000000); nv_icmd(dev, 0x0000098f, 0x00000000); nv_icmd(dev, 0x00000990, 0x00000000); nv_icmd(dev, 0x00000991, 0x00000000); nv_icmd(dev, 0x00000992, 0x00000000); nv_icmd(dev, 0x00000993, 0x00000000); nv_icmd(dev, 0x00000994, 0x00000000); nv_icmd(dev, 0x00000995, 0x00000000); nv_icmd(dev, 0x00000996, 0x00000000); nv_icmd(dev, 0x00000997, 0x00000000); nv_icmd(dev, 0x00000998, 0x00000000); nv_icmd(dev, 0x00000999, 0x00000000); nv_icmd(dev, 0x0000099a, 0x00000000); nv_icmd(dev, 0x0000099b, 0x00000000); nv_icmd(dev, 0x0000099c, 0x00000000); nv_icmd(dev, 0x0000099d, 0x00000000); nv_icmd(dev, 0x0000099e, 0x00000000); nv_icmd(dev, 0x0000099f, 0x00000000); nv_icmd(dev, 0x000009a0, 0x00000000); nv_icmd(dev, 0x000009a1, 0x00000000); nv_icmd(dev, 0x000009a2, 0x00000000); nv_icmd(dev, 0x000009a3, 0x00000000); nv_icmd(dev, 0x000009a4, 0x00000000); nv_icmd(dev, 0x000009a5, 0x00000000); nv_icmd(dev, 0x000009a6, 0x00000000); nv_icmd(dev, 0x000009a7, 0x00000000); nv_icmd(dev, 0x000009a8, 0x00000000); nv_icmd(dev, 0x000009a9, 0x00000000); nv_icmd(dev, 0x000009aa, 0x00000000); nv_icmd(dev, 0x000009ab, 0x00000000); nv_icmd(dev, 0x000009ac, 0x00000000); nv_icmd(dev, 0x000009ad, 0x00000000); nv_icmd(dev, 0x000009ae, 0x00000000); nv_icmd(dev, 0x000009af, 0x00000000); nv_icmd(dev, 0x000009b0, 0x00000000); nv_icmd(dev, 0x000009b1, 0x00000000); nv_icmd(dev, 0x000009b2, 0x00000000); nv_icmd(dev, 0x000009b3, 0x00000000); nv_icmd(dev, 0x000009b4, 0x00000000); nv_icmd(dev, 0x000009b5, 0x00000000); nv_icmd(dev, 0x000009b6, 0x00000000); nv_icmd(dev, 0x000009b7, 0x00000000); nv_icmd(dev, 0x000009b8, 0x00000000); nv_icmd(dev, 0x000009b9, 0x00000000); nv_icmd(dev, 0x000009ba, 0x00000000); nv_icmd(dev, 0x000009bb, 0x00000000); nv_icmd(dev, 0x000009bc, 0x00000000); nv_icmd(dev, 0x000009bd, 0x00000000); nv_icmd(dev, 0x000009be, 0x00000000); nv_icmd(dev, 0x000009bf, 0x00000000); nv_icmd(dev, 0x000009c0, 0x00000000); nv_icmd(dev, 0x000009c1, 0x00000000); nv_icmd(dev, 0x000009c2, 0x00000000); nv_icmd(dev, 0x000009c3, 0x00000000); nv_icmd(dev, 0x000009c4, 0x00000000); nv_icmd(dev, 0x000009c5, 0x00000000); nv_icmd(dev, 0x000009c6, 0x00000000); nv_icmd(dev, 0x000009c7, 0x00000000); nv_icmd(dev, 0x000009c8, 0x00000000); nv_icmd(dev, 0x000009c9, 0x00000000); nv_icmd(dev, 0x000009ca, 0x00000000); nv_icmd(dev, 0x000009cb, 0x00000000); nv_icmd(dev, 0x000009cc, 0x00000000); nv_icmd(dev, 0x000009cd, 0x00000000); nv_icmd(dev, 0x000009ce, 0x00000000); nv_icmd(dev, 0x000009cf, 0x00000000); nv_icmd(dev, 0x000009d0, 0x00000000); nv_icmd(dev, 0x000009d1, 0x00000000); nv_icmd(dev, 0x000009d2, 0x00000000); nv_icmd(dev, 0x000009d3, 0x00000000); nv_icmd(dev, 0x000009d4, 0x00000000); nv_icmd(dev, 0x000009d5, 0x00000000); nv_icmd(dev, 0x000009d6, 0x00000000); nv_icmd(dev, 0x000009d7, 0x00000000); nv_icmd(dev, 0x000009d8, 0x00000000); nv_icmd(dev, 0x000009d9, 0x00000000); nv_icmd(dev, 0x000009da, 0x00000000); nv_icmd(dev, 0x000009db, 0x00000000); nv_icmd(dev, 0x000009dc, 0x00000000); nv_icmd(dev, 0x000009dd, 0x00000000); nv_icmd(dev, 0x000009de, 0x00000000); nv_icmd(dev, 0x000009df, 0x00000000); nv_icmd(dev, 0x000009e0, 0x00000000); nv_icmd(dev, 0x000009e1, 0x00000000); nv_icmd(dev, 0x000009e2, 0x00000000); nv_icmd(dev, 0x000009e3, 0x00000000); nv_icmd(dev, 0x000009e4, 0x00000000); nv_icmd(dev, 0x000009e5, 0x00000000); nv_icmd(dev, 0x000009e6, 0x00000000); nv_icmd(dev, 0x000009e7, 0x00000000); nv_icmd(dev, 0x000009e8, 0x00000000); nv_icmd(dev, 0x000009e9, 0x00000000); nv_icmd(dev, 0x000009ea, 0x00000000); nv_icmd(dev, 0x000009eb, 0x00000000); nv_icmd(dev, 0x000009ec, 0x00000000); nv_icmd(dev, 0x000009ed, 0x00000000); nv_icmd(dev, 0x000009ee, 0x00000000); nv_icmd(dev, 0x000009ef, 0x00000000); nv_icmd(dev, 0x000009f0, 0x00000000); nv_icmd(dev, 0x000009f1, 0x00000000); nv_icmd(dev, 0x000009f2, 0x00000000); nv_icmd(dev, 0x000009f3, 0x00000000); nv_icmd(dev, 0x000009f4, 0x00000000); nv_icmd(dev, 0x000009f5, 0x00000000); nv_icmd(dev, 0x000009f6, 0x00000000); nv_icmd(dev, 0x000009f7, 0x00000000); nv_icmd(dev, 0x000009f8, 0x00000000); nv_icmd(dev, 0x000009f9, 0x00000000); nv_icmd(dev, 0x000009fa, 0x00000000); nv_icmd(dev, 0x000009fb, 0x00000000); nv_icmd(dev, 0x000009fc, 0x00000000); nv_icmd(dev, 0x000009fd, 0x00000000); nv_icmd(dev, 0x000009fe, 0x00000000); nv_icmd(dev, 0x000009ff, 0x00000000); nv_icmd(dev, 0x00000468, 0x00000004); nv_icmd(dev, 0x0000046c, 0x00000001); nv_icmd(dev, 0x00000470, 0x00000000); nv_icmd(dev, 0x00000471, 0x00000000); nv_icmd(dev, 0x00000472, 0x00000000); nv_icmd(dev, 0x00000473, 0x00000000); nv_icmd(dev, 0x00000474, 0x00000000); nv_icmd(dev, 0x00000475, 0x00000000); nv_icmd(dev, 0x00000476, 0x00000000); nv_icmd(dev, 0x00000477, 0x00000000); nv_icmd(dev, 0x00000478, 0x00000000); nv_icmd(dev, 0x00000479, 0x00000000); nv_icmd(dev, 0x0000047a, 0x00000000); nv_icmd(dev, 0x0000047b, 0x00000000); nv_icmd(dev, 0x0000047c, 0x00000000); nv_icmd(dev, 0x0000047d, 0x00000000); nv_icmd(dev, 0x0000047e, 0x00000000); nv_icmd(dev, 0x0000047f, 0x00000000); nv_icmd(dev, 0x00000480, 0x00000000); nv_icmd(dev, 0x00000481, 0x00000000); nv_icmd(dev, 0x00000482, 0x00000000); nv_icmd(dev, 0x00000483, 0x00000000); nv_icmd(dev, 0x00000484, 0x00000000); nv_icmd(dev, 0x00000485, 0x00000000); nv_icmd(dev, 0x00000486, 0x00000000); nv_icmd(dev, 0x00000487, 0x00000000); nv_icmd(dev, 0x00000488, 0x00000000); nv_icmd(dev, 0x00000489, 0x00000000); nv_icmd(dev, 0x0000048a, 0x00000000); nv_icmd(dev, 0x0000048b, 0x00000000); nv_icmd(dev, 0x0000048c, 0x00000000); nv_icmd(dev, 0x0000048d, 0x00000000); nv_icmd(dev, 0x0000048e, 0x00000000); nv_icmd(dev, 0x0000048f, 0x00000000); nv_icmd(dev, 0x00000490, 0x00000000); nv_icmd(dev, 0x00000491, 0x00000000); nv_icmd(dev, 0x00000492, 0x00000000); nv_icmd(dev, 0x00000493, 0x00000000); nv_icmd(dev, 0x00000494, 0x00000000); nv_icmd(dev, 0x00000495, 0x00000000); nv_icmd(dev, 0x00000496, 0x00000000); nv_icmd(dev, 0x00000497, 0x00000000); nv_icmd(dev, 0x00000498, 0x00000000); nv_icmd(dev, 0x00000499, 0x00000000); nv_icmd(dev, 0x0000049a, 0x00000000); nv_icmd(dev, 0x0000049b, 0x00000000); nv_icmd(dev, 0x0000049c, 0x00000000); nv_icmd(dev, 0x0000049d, 0x00000000); nv_icmd(dev, 0x0000049e, 0x00000000); nv_icmd(dev, 0x0000049f, 0x00000000); nv_icmd(dev, 0x000004a0, 0x00000000); nv_icmd(dev, 0x000004a1, 0x00000000); nv_icmd(dev, 0x000004a2, 0x00000000); nv_icmd(dev, 0x000004a3, 0x00000000); nv_icmd(dev, 0x000004a4, 0x00000000); nv_icmd(dev, 0x000004a5, 0x00000000); nv_icmd(dev, 0x000004a6, 0x00000000); nv_icmd(dev, 0x000004a7, 0x00000000); nv_icmd(dev, 0x000004a8, 0x00000000); nv_icmd(dev, 0x000004a9, 0x00000000); nv_icmd(dev, 0x000004aa, 0x00000000); nv_icmd(dev, 0x000004ab, 0x00000000); nv_icmd(dev, 0x000004ac, 0x00000000); nv_icmd(dev, 0x000004ad, 0x00000000); nv_icmd(dev, 0x000004ae, 0x00000000); nv_icmd(dev, 0x000004af, 0x00000000); nv_icmd(dev, 0x000004b0, 0x00000000); nv_icmd(dev, 0x000004b1, 0x00000000); nv_icmd(dev, 0x000004b2, 0x00000000); nv_icmd(dev, 0x000004b3, 0x00000000); nv_icmd(dev, 0x000004b4, 0x00000000); nv_icmd(dev, 0x000004b5, 0x00000000); nv_icmd(dev, 0x000004b6, 0x00000000); nv_icmd(dev, 0x000004b7, 0x00000000); nv_icmd(dev, 0x000004b8, 0x00000000); nv_icmd(dev, 0x000004b9, 0x00000000); nv_icmd(dev, 0x000004ba, 0x00000000); nv_icmd(dev, 0x000004bb, 0x00000000); nv_icmd(dev, 0x000004bc, 0x00000000); nv_icmd(dev, 0x000004bd, 0x00000000); nv_icmd(dev, 0x000004be, 0x00000000); nv_icmd(dev, 0x000004bf, 0x00000000); nv_icmd(dev, 0x000004c0, 0x00000000); nv_icmd(dev, 0x000004c1, 0x00000000); nv_icmd(dev, 0x000004c2, 0x00000000); nv_icmd(dev, 0x000004c3, 0x00000000); nv_icmd(dev, 0x000004c4, 0x00000000); nv_icmd(dev, 0x000004c5, 0x00000000); nv_icmd(dev, 0x000004c6, 0x00000000); nv_icmd(dev, 0x000004c7, 0x00000000); nv_icmd(dev, 0x000004c8, 0x00000000); nv_icmd(dev, 0x000004c9, 0x00000000); nv_icmd(dev, 0x000004ca, 0x00000000); nv_icmd(dev, 0x000004cb, 0x00000000); nv_icmd(dev, 0x000004cc, 0x00000000); nv_icmd(dev, 0x000004cd, 0x00000000); nv_icmd(dev, 0x000004ce, 0x00000000); nv_icmd(dev, 0x000004cf, 0x00000000); nv_icmd(dev, 0x00000510, 0x3f800000); nv_icmd(dev, 0x00000511, 0x3f800000); nv_icmd(dev, 0x00000512, 0x3f800000); nv_icmd(dev, 0x00000513, 0x3f800000); nv_icmd(dev, 0x00000514, 0x3f800000); nv_icmd(dev, 0x00000515, 0x3f800000); nv_icmd(dev, 0x00000516, 0x3f800000); nv_icmd(dev, 0x00000517, 0x3f800000); nv_icmd(dev, 0x00000518, 0x3f800000); nv_icmd(dev, 0x00000519, 0x3f800000); nv_icmd(dev, 0x0000051a, 0x3f800000); nv_icmd(dev, 0x0000051b, 0x3f800000); nv_icmd(dev, 0x0000051c, 0x3f800000); nv_icmd(dev, 0x0000051d, 0x3f800000); nv_icmd(dev, 0x0000051e, 0x3f800000); nv_icmd(dev, 0x0000051f, 0x3f800000); nv_icmd(dev, 0x00000520, 0x000002b6); nv_icmd(dev, 0x00000529, 0x00000001); nv_icmd(dev, 0x00000530, 0xffff0000); nv_icmd(dev, 0x00000531, 0xffff0000); nv_icmd(dev, 0x00000532, 0xffff0000); nv_icmd(dev, 0x00000533, 0xffff0000); nv_icmd(dev, 0x00000534, 0xffff0000); nv_icmd(dev, 0x00000535, 0xffff0000); nv_icmd(dev, 0x00000536, 0xffff0000); nv_icmd(dev, 0x00000537, 0xffff0000); nv_icmd(dev, 0x00000538, 0xffff0000); nv_icmd(dev, 0x00000539, 0xffff0000); nv_icmd(dev, 0x0000053a, 0xffff0000); nv_icmd(dev, 0x0000053b, 0xffff0000); nv_icmd(dev, 0x0000053c, 0xffff0000); nv_icmd(dev, 0x0000053d, 0xffff0000); nv_icmd(dev, 0x0000053e, 0xffff0000); nv_icmd(dev, 0x0000053f, 0xffff0000); nv_icmd(dev, 0x00000585, 0x0000003f); nv_icmd(dev, 0x00000576, 0x00000003); nv_icmd(dev, 0x00000586, 0x00000040); nv_icmd(dev, 0x00000582, 0x00000080); nv_icmd(dev, 0x00000583, 0x00000080); nv_icmd(dev, 0x000005c2, 0x00000001); nv_icmd(dev, 0x00000638, 0x00000001); nv_icmd(dev, 0x00000639, 0x00000001); nv_icmd(dev, 0x0000063a, 0x00000002); nv_icmd(dev, 0x0000063b, 0x00000001); nv_icmd(dev, 0x0000063c, 0x00000001); nv_icmd(dev, 0x0000063d, 0x00000002); nv_icmd(dev, 0x0000063e, 0x00000001); nv_icmd(dev, 0x000008b8, 0x00000001); nv_icmd(dev, 0x000008b9, 0x00000001); nv_icmd(dev, 0x000008ba, 0x00000001); nv_icmd(dev, 0x000008bb, 0x00000001); nv_icmd(dev, 0x000008bc, 0x00000001); nv_icmd(dev, 0x000008bd, 0x00000001); nv_icmd(dev, 0x000008be, 0x00000001); nv_icmd(dev, 0x000008bf, 0x00000001); nv_icmd(dev, 0x00000900, 0x00000001); nv_icmd(dev, 0x00000901, 0x00000001); nv_icmd(dev, 0x00000902, 0x00000001); nv_icmd(dev, 0x00000903, 0x00000001); nv_icmd(dev, 0x00000904, 0x00000001); nv_icmd(dev, 0x00000905, 0x00000001); nv_icmd(dev, 0x00000906, 0x00000001); nv_icmd(dev, 0x00000907, 0x00000001); nv_icmd(dev, 0x00000908, 0x00000002); nv_icmd(dev, 0x00000909, 0x00000002); nv_icmd(dev, 0x0000090a, 0x00000002); nv_icmd(dev, 0x0000090b, 0x00000002); nv_icmd(dev, 0x0000090c, 0x00000002); nv_icmd(dev, 0x0000090d, 0x00000002); nv_icmd(dev, 0x0000090e, 0x00000002); nv_icmd(dev, 0x0000090f, 0x00000002); nv_icmd(dev, 0x00000910, 0x00000001); nv_icmd(dev, 0x00000911, 0x00000001); nv_icmd(dev, 0x00000912, 0x00000001); nv_icmd(dev, 0x00000913, 0x00000001); nv_icmd(dev, 0x00000914, 0x00000001); nv_icmd(dev, 0x00000915, 0x00000001); nv_icmd(dev, 0x00000916, 0x00000001); nv_icmd(dev, 0x00000917, 0x00000001); nv_icmd(dev, 0x00000918, 0x00000001); nv_icmd(dev, 0x00000919, 0x00000001); nv_icmd(dev, 0x0000091a, 0x00000001); nv_icmd(dev, 0x0000091b, 0x00000001); nv_icmd(dev, 0x0000091c, 0x00000001); nv_icmd(dev, 0x0000091d, 0x00000001); nv_icmd(dev, 0x0000091e, 0x00000001); nv_icmd(dev, 0x0000091f, 0x00000001); nv_icmd(dev, 0x00000920, 0x00000002); nv_icmd(dev, 0x00000921, 0x00000002); nv_icmd(dev, 0x00000922, 0x00000002); nv_icmd(dev, 0x00000923, 0x00000002); nv_icmd(dev, 0x00000924, 0x00000002); nv_icmd(dev, 0x00000925, 0x00000002); nv_icmd(dev, 0x00000926, 0x00000002); nv_icmd(dev, 0x00000927, 0x00000002); nv_icmd(dev, 0x00000928, 0x00000001); nv_icmd(dev, 0x00000929, 0x00000001); nv_icmd(dev, 0x0000092a, 0x00000001); nv_icmd(dev, 0x0000092b, 0x00000001); nv_icmd(dev, 0x0000092c, 0x00000001); nv_icmd(dev, 0x0000092d, 0x00000001); nv_icmd(dev, 0x0000092e, 0x00000001); nv_icmd(dev, 0x0000092f, 0x00000001); nv_icmd(dev, 0x00000648, 0x00000001); nv_icmd(dev, 0x00000649, 0x00000001); nv_icmd(dev, 0x0000064a, 0x00000001); nv_icmd(dev, 0x0000064b, 0x00000001); nv_icmd(dev, 0x0000064c, 0x00000001); nv_icmd(dev, 0x0000064d, 0x00000001); nv_icmd(dev, 0x0000064e, 0x00000001); nv_icmd(dev, 0x0000064f, 0x00000001); nv_icmd(dev, 0x00000650, 0x00000001); nv_icmd(dev, 0x00000658, 0x0000000f); nv_icmd(dev, 0x000007ff, 0x0000000a); nv_icmd(dev, 0x0000066a, 0x40000000); nv_icmd(dev, 0x0000066b, 0x10000000); nv_icmd(dev, 0x0000066c, 0xffff0000); nv_icmd(dev, 0x0000066d, 0xffff0000); nv_icmd(dev, 0x000007af, 0x00000008); nv_icmd(dev, 0x000007b0, 0x00000008); nv_icmd(dev, 0x000007f6, 0x00000001); nv_icmd(dev, 0x000006b2, 0x00000055); nv_icmd(dev, 0x000007ad, 0x00000003); nv_icmd(dev, 0x00000937, 0x00000001); nv_icmd(dev, 0x00000971, 0x00000008); nv_icmd(dev, 0x00000972, 0x00000040); nv_icmd(dev, 0x00000973, 0x0000012c); nv_icmd(dev, 0x0000097c, 0x00000040); nv_icmd(dev, 0x00000979, 0x00000003); nv_icmd(dev, 0x00000975, 0x00000020); nv_icmd(dev, 0x00000976, 0x00000001); nv_icmd(dev, 0x00000977, 0x00000020); nv_icmd(dev, 0x00000978, 0x00000001); nv_icmd(dev, 0x00000957, 0x00000003); nv_icmd(dev, 0x0000095e, 0x20164010); nv_icmd(dev, 0x0000095f, 0x00000020); nv_icmd(dev, 0x00000683, 0x00000006); nv_icmd(dev, 0x00000685, 0x003fffff); nv_icmd(dev, 0x00000687, 0x00000c48); nv_icmd(dev, 0x000006a0, 0x00000005); nv_icmd(dev, 0x00000840, 0x00300008); nv_icmd(dev, 0x00000841, 0x04000080); nv_icmd(dev, 0x00000842, 0x00300008); nv_icmd(dev, 0x00000843, 0x04000080); nv_icmd(dev, 0x00000818, 0x00000000); nv_icmd(dev, 0x00000819, 0x00000000); nv_icmd(dev, 0x0000081a, 0x00000000); nv_icmd(dev, 0x0000081b, 0x00000000); nv_icmd(dev, 0x0000081c, 0x00000000); nv_icmd(dev, 0x0000081d, 0x00000000); nv_icmd(dev, 0x0000081e, 0x00000000); nv_icmd(dev, 0x0000081f, 0x00000000); nv_icmd(dev, 0x00000848, 0x00000000); nv_icmd(dev, 0x00000849, 0x00000000); nv_icmd(dev, 0x0000084a, 0x00000000); nv_icmd(dev, 0x0000084b, 0x00000000); nv_icmd(dev, 0x0000084c, 0x00000000); nv_icmd(dev, 0x0000084d, 0x00000000); nv_icmd(dev, 0x0000084e, 0x00000000); nv_icmd(dev, 0x0000084f, 0x00000000); nv_icmd(dev, 0x00000850, 0x00000000); nv_icmd(dev, 0x00000851, 0x00000000); nv_icmd(dev, 0x00000852, 0x00000000); nv_icmd(dev, 0x00000853, 0x00000000); nv_icmd(dev, 0x00000854, 0x00000000); nv_icmd(dev, 0x00000855, 0x00000000); nv_icmd(dev, 0x00000856, 0x00000000); nv_icmd(dev, 0x00000857, 0x00000000); nv_icmd(dev, 0x00000738, 0x00000000); nv_icmd(dev, 0x000006aa, 0x00000001); nv_icmd(dev, 0x000006ab, 0x00000002); nv_icmd(dev, 0x000006ac, 0x00000080); nv_icmd(dev, 0x000006ad, 0x00000100); nv_icmd(dev, 0x000006ae, 0x00000100); nv_icmd(dev, 0x000006b1, 0x00000011); nv_icmd(dev, 0x000006bb, 0x000000cf); nv_icmd(dev, 0x000006ce, 0x2a712488); nv_icmd(dev, 0x00000739, 0x4085c000); nv_icmd(dev, 0x0000073a, 0x00000080); nv_icmd(dev, 0x00000786, 0x80000100); nv_icmd(dev, 0x0000073c, 0x00010100); nv_icmd(dev, 0x0000073d, 0x02800000); nv_icmd(dev, 0x00000787, 0x000000cf); nv_icmd(dev, 0x0000078c, 0x00000008); nv_icmd(dev, 0x00000792, 0x00000001); nv_icmd(dev, 0x00000794, 0x00000001); nv_icmd(dev, 0x00000795, 0x00000001); nv_icmd(dev, 0x00000796, 0x00000001); nv_icmd(dev, 0x00000797, 0x000000cf); nv_icmd(dev, 0x00000836, 0x00000001); nv_icmd(dev, 0x0000079a, 0x00000002); nv_icmd(dev, 0x00000833, 0x04444480); nv_icmd(dev, 0x000007a1, 0x00000001); nv_icmd(dev, 0x000007a3, 0x00000001); nv_icmd(dev, 0x000007a4, 0x00000001); nv_icmd(dev, 0x000007a5, 0x00000001); nv_icmd(dev, 0x00000831, 0x00000004); nv_icmd(dev, 0x0000080c, 0x00000002); nv_icmd(dev, 0x0000080d, 0x00000100); nv_icmd(dev, 0x0000080e, 0x00000100); nv_icmd(dev, 0x0000080f, 0x00000001); nv_icmd(dev, 0x00000823, 0x00000002); nv_icmd(dev, 0x00000824, 0x00000100); nv_icmd(dev, 0x00000825, 0x00000100); nv_icmd(dev, 0x00000826, 0x00000001); nv_icmd(dev, 0x0000095d, 0x00000001); nv_icmd(dev, 0x0000082b, 0x00000004); nv_icmd(dev, 0x00000942, 0x00010001); nv_icmd(dev, 0x00000943, 0x00000001); nv_icmd(dev, 0x00000944, 0x00000022); nv_icmd(dev, 0x000007c5, 0x00010001); nv_icmd(dev, 0x00000834, 0x00000001); nv_icmd(dev, 0x000007c7, 0x00000001); nv_icmd(dev, 0x0000c1b0, 0x0000000f); nv_icmd(dev, 0x0000c1b1, 0x0000000f); nv_icmd(dev, 0x0000c1b2, 0x0000000f); nv_icmd(dev, 0x0000c1b3, 0x0000000f); nv_icmd(dev, 0x0000c1b4, 0x0000000f); nv_icmd(dev, 0x0000c1b5, 0x0000000f); nv_icmd(dev, 0x0000c1b6, 0x0000000f); nv_icmd(dev, 0x0000c1b7, 0x0000000f); nv_icmd(dev, 0x0000c1b8, 0x0fac6881); nv_icmd(dev, 0x0000c1b9, 0x00fac688); nv_icmd(dev, 0x0001e100, 0x00000001); nv_icmd(dev, 0x00001000, 0x00000002); nv_icmd(dev, 0x000006aa, 0x00000001); nv_icmd(dev, 0x000006ad, 0x00000100); nv_icmd(dev, 0x000006ae, 0x00000100); nv_icmd(dev, 0x000006b1, 0x00000011); nv_icmd(dev, 0x0000078c, 0x00000008); nv_icmd(dev, 0x00000792, 0x00000001); nv_icmd(dev, 0x00000794, 0x00000001); nv_icmd(dev, 0x00000795, 0x00000001); nv_icmd(dev, 0x00000796, 0x00000001); nv_icmd(dev, 0x00000797, 0x000000cf); nv_icmd(dev, 0x0000079a, 0x00000002); nv_icmd(dev, 0x00000833, 0x04444480); nv_icmd(dev, 0x000007a1, 0x00000001); nv_icmd(dev, 0x000007a3, 0x00000001); nv_icmd(dev, 0x000007a4, 0x00000001); nv_icmd(dev, 0x000007a5, 0x00000001); nv_icmd(dev, 0x00000831, 0x00000004); nv_icmd(dev, 0x0001e100, 0x00000001); nv_icmd(dev, 0x00001000, 0x00000014); nv_icmd(dev, 0x00000351, 0x00000100); nv_icmd(dev, 0x00000957, 0x00000003); nv_icmd(dev, 0x0000095d, 0x00000001); nv_icmd(dev, 0x0000082b, 0x00000004); nv_icmd(dev, 0x00000942, 0x00010001); nv_icmd(dev, 0x00000943, 0x00000001); nv_icmd(dev, 0x000007c5, 0x00010001); nv_icmd(dev, 0x00000834, 0x00000001); nv_icmd(dev, 0x000007c7, 0x00000001); nv_icmd(dev, 0x0001e100, 0x00000001); nv_icmd(dev, 0x00001000, 0x00000001); nv_icmd(dev, 0x0000080c, 0x00000002); nv_icmd(dev, 0x0000080d, 0x00000100); nv_icmd(dev, 0x0000080e, 0x00000100); nv_icmd(dev, 0x0000080f, 0x00000001); nv_icmd(dev, 0x00000823, 0x00000002); nv_icmd(dev, 0x00000824, 0x00000100); nv_icmd(dev, 0x00000825, 0x00000100); nv_icmd(dev, 0x00000826, 0x00000001); nv_icmd(dev, 0x0001e100, 0x00000001); nv_wr32(dev, 0x400208, 0x00000000); nv_wr32(dev, 0x404154, 0x00000400); nvc0_grctx_generate_9097(dev); nvc0_grctx_generate_902d(dev); nvc0_grctx_generate_9039(dev); nvc0_grctx_generate_90c0(dev); nv_wr32(dev, 0x000260, r000260); return 0; }
gpl-2.0
TeamBliss-Devices/android_kernel_moto_shamu
drivers/tty/serial/arc_uart.c
1957
20569
/* * ARC On-Chip(fpga) UART Driver * * Copyright (C) 2010-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * vineetg: July 10th 2012 * -Decoupled the driver from arch/arc * +Using platform_get_resource() for irq/membase (thx to bfin_uart.c) * +Using early_platform_xxx() for early console (thx to mach-shmobile/xxx) * * Vineetg: Aug 21st 2010 * -Is uart_tx_stopped() not done in tty write path as it has already been * taken care of, in serial core * * Vineetg: Aug 18th 2010 * -New Serial Core based ARC UART driver * -Derived largely from blackfin driver albiet with some major tweaks * * TODO: * -check if sysreq works */ #if defined(CONFIG_SERIAL_ARC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_platform.h> /************************************* * ARC UART Hardware Specs ************************************/ #define ARC_UART_TX_FIFO_SIZE 1 /* * UART Register set (this is not a Standards Compliant IP) * Also each reg is Word aligned, but only 8 bits wide */ #define R_ID0 0 #define R_ID1 4 #define R_ID2 8 #define R_ID3 12 #define R_DATA 16 #define R_STS 20 #define R_BAUDL 24 #define R_BAUDH 28 /* Bits for UART Status Reg (R/W) */ #define RXIENB 0x04 /* Receive Interrupt Enable */ #define TXIENB 0x40 /* Transmit Interrupt Enable */ #define RXEMPTY 0x20 /* Receive FIFO Empty: No char receivede */ #define TXEMPTY 0x80 /* Transmit FIFO Empty, thus char can be written into */ #define RXFULL 0x08 /* Receive FIFO full */ #define RXFULL1 0x10 /* Receive FIFO has space for 1 char (tot space=4) */ #define RXFERR 0x01 /* Frame Error: Stop Bit not detected */ #define RXOERR 0x02 /* OverFlow Err: Char recv but RXFULL still set */ /* Uart bit fiddling helpers: lowest level */ #define RBASE(uart, reg) (uart->port.membase + reg) #define UART_REG_SET(u, r, v) writeb((v), RBASE(u, r)) #define UART_REG_GET(u, r) readb(RBASE(u, r)) #define UART_REG_OR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) | (v)) #define UART_REG_CLR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) & ~(v)) /* Uart bit fiddling helpers: API level */ #define UART_SET_DATA(uart, val) UART_REG_SET(uart, R_DATA, val) #define UART_GET_DATA(uart) UART_REG_GET(uart, R_DATA) #define UART_SET_BAUDH(uart, val) UART_REG_SET(uart, R_BAUDH, val) #define UART_SET_BAUDL(uart, val) UART_REG_SET(uart, R_BAUDL, val) #define UART_CLR_STATUS(uart, val) UART_REG_CLR(uart, R_STS, val) #define UART_GET_STATUS(uart) UART_REG_GET(uart, R_STS) #define UART_ALL_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB|TXIENB) #define UART_RX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB) #define UART_TX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, TXIENB) #define UART_ALL_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB|TXIENB) #define UART_RX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB) #define UART_TX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, TXIENB) #define ARC_SERIAL_DEV_NAME "ttyARC" struct arc_uart_port { struct uart_port port; unsigned long baud; int is_emulated; /* H/w vs. Instruction Set Simulator */ }; #define to_arc_port(uport) container_of(uport, struct arc_uart_port, port) static struct arc_uart_port arc_uart_ports[CONFIG_SERIAL_ARC_NR_PORTS]; #ifdef CONFIG_SERIAL_ARC_CONSOLE static struct console arc_console; #endif #define DRIVER_NAME "arc-uart" static struct uart_driver arc_uart_driver = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = ARC_SERIAL_DEV_NAME, .major = 0, .minor = 0, .nr = CONFIG_SERIAL_ARC_NR_PORTS, #ifdef CONFIG_SERIAL_ARC_CONSOLE .cons = &arc_console, #endif }; static void arc_serial_stop_rx(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); UART_RX_IRQ_DISABLE(uart); } static void arc_serial_stop_tx(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); while (!(UART_GET_STATUS(uart) & TXEMPTY)) cpu_relax(); UART_TX_IRQ_DISABLE(uart); } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int arc_serial_tx_empty(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); unsigned int stat; stat = UART_GET_STATUS(uart); if (stat & TXEMPTY) return TIOCSER_TEMT; return 0; } /* * Driver internal routine, used by both tty(serial core) as well as tx-isr * -Called under spinlock in either cases * -also tty->stopped has already been checked * = by uart_start( ) before calling us * = tx_ist checks that too before calling */ static void arc_serial_tx_chars(struct arc_uart_port *uart) { struct circ_buf *xmit = &uart->port.state->xmit; int sent = 0; unsigned char ch; if (unlikely(uart->port.x_char)) { UART_SET_DATA(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; sent = 1; } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */ ch = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uart->port.icount.tx++; while (!(UART_GET_STATUS(uart) & TXEMPTY)) cpu_relax(); UART_SET_DATA(uart, ch); sent = 1; } /* * If num chars in xmit buffer are too few, ask tty layer for more. * By Hard ISR to schedule processing in software interrupt part */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); if (sent) UART_TX_IRQ_ENABLE(uart); } /* * port is locked and interrupts are disabled * uart_start( ) calls us under the port spinlock irqsave */ static void arc_serial_start_tx(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); arc_serial_tx_chars(uart); } static void arc_serial_rx_chars(struct arc_uart_port *uart) { unsigned int status, ch, flg = 0; /* * UART has 4 deep RX-FIFO. Driver's recongnition of this fact * is very subtle. Here's how ... * Upon getting a RX-Intr, such that RX-EMPTY=0, meaning data available, * driver reads the DATA Reg and keeps doing that in a loop, until * RX-EMPTY=1. Multiple chars being avail, with a single Interrupt, * before RX-EMPTY=0, implies some sort of buffering going on in the * controller, which is indeed the Rx-FIFO. */ while (!((status = UART_GET_STATUS(uart)) & RXEMPTY)) { ch = UART_GET_DATA(uart); uart->port.icount.rx++; if (unlikely(status & (RXOERR | RXFERR))) { if (status & RXOERR) { uart->port.icount.overrun++; flg = TTY_OVERRUN; UART_CLR_STATUS(uart, RXOERR); } if (status & RXFERR) { uart->port.icount.frame++; flg = TTY_FRAME; UART_CLR_STATUS(uart, RXFERR); } } else flg = TTY_NORMAL; if (unlikely(uart_handle_sysrq_char(&uart->port, ch))) goto done; uart_insert_char(&uart->port, status, RXOERR, ch, flg); done: tty_flip_buffer_push(&uart->port.state->port); } } /* * A note on the Interrupt handling state machine of this driver * * kernel printk writes funnel thru the console driver framework and in order * to keep things simple as well as efficient, it writes to UART in polled * mode, in one shot, and exits. * * OTOH, Userland output (via tty layer), uses interrupt based writes as there * can be undeterministic delay between char writes. * * Thus Rx-interrupts are always enabled, while tx-interrupts are by default * disabled. * * When tty has some data to send out, serial core calls driver's start_tx * which * -checks-if-tty-buffer-has-char-to-send * -writes-data-to-uart * -enable-tx-intr * * Once data bits are pushed out, controller raises the Tx-room-avail-Interrupt. * The first thing Tx ISR does is disable further Tx interrupts (as this could * be the last char to send, before settling down into the quiet polled mode). * It then calls the exact routine used by tty layer write to send out any * more char in tty buffer. In case of sending, it re-enables Tx-intr. In case * of no data, it remains disabled. * This is how the transmit state machine is dynamically switched on/off */ static irqreturn_t arc_serial_isr(int irq, void *dev_id) { struct arc_uart_port *uart = dev_id; unsigned int status; status = UART_GET_STATUS(uart); /* * Single IRQ for both Rx (data available) Tx (room available) Interrupt * notifications from the UART Controller. * To demultiplex between the two, we check the relevant bits */ if ((status & RXIENB) && !(status & RXEMPTY)) { /* already in ISR, no need of xx_irqsave */ spin_lock(&uart->port.lock); arc_serial_rx_chars(uart); spin_unlock(&uart->port.lock); } if ((status & TXIENB) && (status & TXEMPTY)) { /* Unconditionally disable further Tx-Interrupts. * will be enabled by tx_chars() if needed. */ UART_TX_IRQ_DISABLE(uart); spin_lock(&uart->port.lock); if (!uart_tx_stopped(&uart->port)) arc_serial_tx_chars(uart); spin_unlock(&uart->port.lock); } return IRQ_HANDLED; } static unsigned int arc_serial_get_mctrl(struct uart_port *port) { /* * Pretend we have a Modem status reg and following bits are * always set, to satify the serial core state machine * (DSR) Data Set Ready * (CTS) Clear To Send * (CAR) Carrier Detect */ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void arc_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) { /* MCR not present */ } /* Enable Modem Status Interrupts */ static void arc_serial_enable_ms(struct uart_port *port) { /* MSR not present */ } static void arc_serial_break_ctl(struct uart_port *port, int break_state) { /* ARC UART doesn't support sending Break signal */ } static int arc_serial_startup(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); /* Before we hook up the ISR, Disable all UART Interrupts */ UART_ALL_IRQ_DISABLE(uart); if (request_irq(uart->port.irq, arc_serial_isr, 0, "arc uart rx-tx", uart)) { dev_warn(uart->port.dev, "Unable to attach ARC UART intr\n"); return -EBUSY; } UART_RX_IRQ_ENABLE(uart); /* Only Rx IRQ enabled to begin with */ return 0; } /* This is not really needed */ static void arc_serial_shutdown(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); free_irq(uart->port.irq, uart); } static void arc_serial_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { struct arc_uart_port *uart = to_arc_port(port); unsigned int baud, uartl, uarth, hw_val; unsigned long flags; /* * Use the generic handler so that any specially encoded baud rates * such as SPD_xx flags or "%B0" can be handled * Max Baud I suppose will not be more than current 115K * 4 * Formula for ARC UART is: hw-val = ((CLK/(BAUD*4)) -1) * spread over two 8-bit registers */ baud = uart_get_baud_rate(port, new, old, 0, 460800); hw_val = port->uartclk / (uart->baud * 4) - 1; uartl = hw_val & 0xFF; uarth = (hw_val >> 8) & 0xFF; /* * UART ISS(Instruction Set simulator) emulation has a subtle bug: * A existing value of Baudh = 0 is used as a indication to startup * it's internal state machine. * Thus if baudh is set to 0, 2 times, it chokes. * This happens with BAUD=115200 and the formaula above * Until that is fixed, when running on ISS, we will set baudh to !0 */ if (uart->is_emulated) uarth = 1; spin_lock_irqsave(&port->lock, flags); UART_ALL_IRQ_DISABLE(uart); UART_SET_BAUDL(uart, uartl); UART_SET_BAUDH(uart, uarth); UART_RX_IRQ_ENABLE(uart); /* * UART doesn't support Parity/Hardware Flow Control; * Only supports 8N1 character size */ new->c_cflag &= ~(CMSPAR|CRTSCTS|CSIZE); new->c_cflag |= CS8; if (old) tty_termios_copy_hw(new, old); /* Don't rewrite B0 */ if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); uart_update_timeout(port, new->c_cflag, baud); spin_unlock_irqrestore(&port->lock, flags); } static const char *arc_serial_type(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); return uart->port.type == PORT_ARC ? DRIVER_NAME : NULL; } static void arc_serial_release_port(struct uart_port *port) { } static int arc_serial_request_port(struct uart_port *port) { return 0; } /* * Verify the new serial_struct (for TIOCSSERIAL). */ static int arc_serial_verify_port(struct uart_port *port, struct serial_struct *ser) { if (port->type != PORT_UNKNOWN && ser->type != PORT_ARC) return -EINVAL; return 0; } /* * Configure/autoconfigure the port. */ static void arc_serial_config_port(struct uart_port *port, int flags) { struct arc_uart_port *uart = to_arc_port(port); if (flags & UART_CONFIG_TYPE) uart->port.type = PORT_ARC; } #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_ARC_CONSOLE) static void arc_serial_poll_putchar(struct uart_port *port, unsigned char chr) { struct arc_uart_port *uart = to_arc_port(port); while (!(UART_GET_STATUS(uart) & TXEMPTY)) cpu_relax(); UART_SET_DATA(uart, chr); } #endif #ifdef CONFIG_CONSOLE_POLL static int arc_serial_poll_getchar(struct uart_port *port) { struct arc_uart_port *uart = to_arc_port(port); unsigned char chr; while (!(UART_GET_STATUS(uart) & RXEMPTY)) cpu_relax(); chr = UART_GET_DATA(uart); return chr; } #endif static struct uart_ops arc_serial_pops = { .tx_empty = arc_serial_tx_empty, .set_mctrl = arc_serial_set_mctrl, .get_mctrl = arc_serial_get_mctrl, .stop_tx = arc_serial_stop_tx, .start_tx = arc_serial_start_tx, .stop_rx = arc_serial_stop_rx, .enable_ms = arc_serial_enable_ms, .break_ctl = arc_serial_break_ctl, .startup = arc_serial_startup, .shutdown = arc_serial_shutdown, .set_termios = arc_serial_set_termios, .type = arc_serial_type, .release_port = arc_serial_release_port, .request_port = arc_serial_request_port, .config_port = arc_serial_config_port, .verify_port = arc_serial_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_put_char = arc_serial_poll_putchar, .poll_get_char = arc_serial_poll_getchar, #endif }; static int arc_uart_init_one(struct platform_device *pdev, int dev_id) { struct resource *res, *res2; unsigned long *plat_data; struct arc_uart_port *uart = &arc_uart_ports[dev_id]; plat_data = ((unsigned long *)(pdev->dev.platform_data)); if (!plat_data) return -ENODEV; uart->is_emulated = !!plat_data[0]; /* workaround ISS bug */ if (is_early_platform_device(pdev)) { uart->port.uartclk = plat_data[1]; uart->baud = plat_data[2]; } else { struct device_node *np = pdev->dev.of_node; u32 val; if (of_property_read_u32(np, "clock-frequency", &val)) { dev_err(&pdev->dev, "clock-frequency property NOTset\n"); return -EINVAL; } uart->port.uartclk = val; if (of_property_read_u32(np, "current-speed", &val)) { dev_err(&pdev->dev, "current-speed property NOT set\n"); return -EINVAL; } uart->baud = val; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) return -ENODEV; uart->port.mapbase = res->start; uart->port.membase = ioremap_nocache(res->start, resource_size(res)); if (!uart->port.membase) /* No point of dev_err since UART itself is hosed here */ return -ENXIO; uart->port.irq = res2->start; uart->port.dev = &pdev->dev; uart->port.iotype = UPIO_MEM; uart->port.flags = UPF_BOOT_AUTOCONF; uart->port.line = dev_id; uart->port.ops = &arc_serial_pops; uart->port.fifosize = ARC_UART_TX_FIFO_SIZE; /* * uart_insert_char( ) uses it in decideding whether to ignore a * char or not. Explicitly setting it here, removes the subtelty */ uart->port.ignore_status_mask = 0; return 0; } #ifdef CONFIG_SERIAL_ARC_CONSOLE static int arc_serial_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index < 0 || co->index >= CONFIG_SERIAL_ARC_NR_PORTS) return -ENODEV; /* * The uart port backing the console (e.g. ttyARC1) might not have been * init yet. If so, defer the console setup to after the port. */ port = &arc_uart_ports[co->index].port; if (!port->membase) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); /* * Serial core will call port->ops->set_termios( ) * which will set the baud reg */ return uart_set_options(port, co, baud, parity, bits, flow); } static void arc_serial_console_putchar(struct uart_port *port, int ch) { arc_serial_poll_putchar(port, (unsigned char)ch); } /* * Interrupts are disabled on entering */ static void arc_serial_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &arc_uart_ports[co->index].port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); uart_console_write(port, s, count, arc_serial_console_putchar); spin_unlock_irqrestore(&port->lock, flags); } static struct console arc_console = { .name = ARC_SERIAL_DEV_NAME, .write = arc_serial_console_write, .device = uart_console_device, .setup = arc_serial_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &arc_uart_driver }; static __init void early_serial_write(struct console *con, const char *s, unsigned int n) { struct uart_port *port = &arc_uart_ports[con->index].port; unsigned int i; for (i = 0; i < n; i++, s++) { if (*s == '\n') arc_serial_poll_putchar(port, '\r'); arc_serial_poll_putchar(port, *s); } } static struct console arc_early_serial_console __initdata = { .name = "early_ARCuart", .write = early_serial_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1 }; static int __init arc_serial_probe_earlyprintk(struct platform_device *pdev) { int dev_id = pdev->id < 0 ? 0 : pdev->id; int rc; arc_early_serial_console.index = dev_id; rc = arc_uart_init_one(pdev, dev_id); if (rc) panic("early console init failed\n"); arc_serial_console_setup(&arc_early_serial_console, NULL); register_console(&arc_early_serial_console); return 0; } #endif /* CONFIG_SERIAL_ARC_CONSOLE */ static int arc_serial_probe(struct platform_device *pdev) { int rc, dev_id; struct device_node *np = pdev->dev.of_node; /* no device tree device */ if (!np) return -ENODEV; dev_id = of_alias_get_id(np, "serial"); if (dev_id < 0) dev_id = 0; rc = arc_uart_init_one(pdev, dev_id); if (rc) return rc; rc = uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port); return rc; } static int arc_serial_remove(struct platform_device *pdev) { /* This will never be called */ return 0; } static const struct of_device_id arc_uart_dt_ids[] = { { .compatible = "snps,arc-uart" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, arc_uart_dt_ids); static struct platform_driver arc_platform_driver = { .probe = arc_serial_probe, .remove = arc_serial_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = arc_uart_dt_ids, }, }; #ifdef CONFIG_SERIAL_ARC_CONSOLE static struct platform_driver early_arc_platform_driver __initdata = { .probe = arc_serial_probe_earlyprintk, .remove = arc_serial_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; /* * Register an early platform driver of "earlyprintk" class. * ARCH platform code installs the driver and probes the early devices * The installation could rely on user specifying earlyprintk=xyx in cmd line * or it could be done independently, for all "earlyprintk" class drivers. * [see arch/arc/plat-arcfpga/platform.c] */ early_platform_init("earlyprintk", &early_arc_platform_driver); #endif /* CONFIG_SERIAL_ARC_CONSOLE */ static int __init arc_serial_init(void) { int ret; ret = uart_register_driver(&arc_uart_driver); if (ret) return ret; ret = platform_driver_register(&arc_platform_driver); if (ret) uart_unregister_driver(&arc_uart_driver); return ret; } static void __exit arc_serial_exit(void) { platform_driver_unregister(&arc_platform_driver); uart_unregister_driver(&arc_uart_driver); } module_init(arc_serial_init); module_exit(arc_serial_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Vineet Gupta"); MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
gpl-2.0
aerickson/xbmc
lib/libUPnP/Neptune/ThirdParty/zlib-1.2.8/infback.c
2213
22709
/* infback.c -- inflate using a call-back interface * Copyright (C) 1995-2011 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* This code is largely copied from inflate.c. Normally either infback.o or inflate.o would be linked into an application--not both. The interface with inffast.c is retained so that optimized assembler-coded versions of inflate_fast() can be used with either inflate.c or infback.c. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); /* strm provides memory allocation functions in zalloc and zfree, or Z_NULL to use the library memory allocation functions. windowBits is in the range 8..15, and window is a user-supplied window and output buffer that is 2**windowBits bytes. */ int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) z_streamp strm; int windowBits; unsigned char FAR *window; const char *version; int stream_size; { struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL || window == Z_NULL || windowBits < 8 || windowBits > 15) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->dmax = 32768U; state->wbits = windowBits; state->wsize = 1U << windowBits; state->window = window; state->wnext = 0; state->whave = 0; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } /* Macros for inflateBack(): */ /* Load returned state from inflate_fast() */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Set state from registers for inflate_fast() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Assure that some input is available. If input is requested, but denied, then return a Z_BUF_ERROR from inflateBack(). */ #define PULL() \ do { \ if (have == 0) { \ have = in(in_desc, &next); \ if (have == 0) { \ next = Z_NULL; \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflateBack() with an error if there is no input available. */ #define PULLBYTE() \ do { \ PULL(); \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflateBack() with an error. */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Assure that some output space is available, by writing out the window if it's full. If the write fails, return from inflateBack() with a Z_BUF_ERROR. */ #define ROOM() \ do { \ if (left == 0) { \ put = state->window; \ left = state->wsize; \ state->whave = left; \ if (out(out_desc, put, left)) { \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* strm provides the memory allocation functions and window buffer on input, and provides information on the unused input on return. For Z_DATA_ERROR returns, strm will also provide an error message. in() and out() are the call-back input and output functions. When inflateBack() needs more input, it calls in(). When inflateBack() has filled the window with output, or when it completes with data in the window, it calls out() to write out the data. The application must not change the provided input until in() is called again or inflateBack() returns. The application must not change the window/output buffer until inflateBack() returns. in() and out() are called with a descriptor parameter provided in the inflateBack() call. This parameter can be a structure that provides the information required to do the read or write, as well as accumulated information on the input and output such as totals and check values. in() should return zero on failure. out() should return non-zero on failure. If either in() or out() fails, than inflateBack() returns a Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it was in() or out() that caused in the error. Otherwise, inflateBack() returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format error, or Z_MEM_ERROR if it could not allocate memory for the state. inflateBack() can also return Z_STREAM_ERROR if the input parameters are not correct, i.e. strm is Z_NULL or the state was not initialized. */ int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) z_streamp strm; in_func in; void FAR *in_desc; out_func out; void FAR *out_desc; { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Check that the strm exists and that the state was initialized */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* Reset the state */ strm->msg = Z_NULL; state->mode = TYPE; state->last = 0; state->whave = 0; next = strm->next_in; have = next != Z_NULL ? strm->avail_in : 0; hold = 0; bits = 0; put = state->window; left = state->wsize; /* Inflate until end of block marked as last */ for (;;) switch (state->mode) { case TYPE: /* determine and dispatch block type */ if (state->last) { BYTEBITS(); state->mode = DONE; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: /* get and verify stored block length */ BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); /* copy stored block from input to output */ while (state->length != 0) { copy = state->length; PULL(); ROOM(); if (copy > have) copy = have; if (copy > left) copy = left; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: /* get dynamic table entries descriptor */ NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); /* get code length code lengths (not a typo) */ state->have = 0; while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); /* get length and distance code code lengths */ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = (unsigned)(state->lens[state->have - 1]); copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; case LEN: /* use inflate_fast() if we have enough input and output */ if (have >= 6 && left >= 258) { RESTORE(); if (state->whave < state->wsize) state->whave = state->wsize - left; inflate_fast(strm, state->wsize); LOAD(); break; } /* get a literal, length, or end-of-block code */ for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); state->length = (unsigned)here.val; /* process literal */ if (here.op == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; } /* process end of block */ if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } Tracevv((stderr, "inflate: length %u\n", state->length)); /* get distance code */ for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; /* get distance extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } if (state->offset > state->wsize - (state->whave < state->wsize ? left : 0)) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } Tracevv((stderr, "inflate: distance %u\n", state->offset)); /* copy match from window to output */ do { ROOM(); copy = state->wsize - state->offset; if (copy < left) { from = put + copy; copy = left - copy; } else { from = put - state->offset; copy = left; } if (copy > state->length) copy = state->length; state->length -= copy; left -= copy; do { *put++ = *from++; } while (--copy); } while (state->length != 0); break; case DONE: /* inflate stream terminated properly -- write leftover output */ ret = Z_STREAM_END; if (left < state->wsize) { if (out(out_desc, state->window, state->wsize - left)) ret = Z_BUF_ERROR; } goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; default: /* can't happen, but makes compilers happy */ ret = Z_STREAM_ERROR; goto inf_leave; } /* Return unused input */ inf_leave: strm->next_in = next; strm->avail_in = have; return ret; } int ZEXPORT inflateBackEnd(strm) z_streamp strm; { if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; }
gpl-2.0
cile381/H815_kernel
drivers/net/bonding/bond_debugfs.c
2725
3051
#include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/netdevice.h> #include "bonding.h" #include "bond_alb.h" #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *bonding_debug_root; /* * Show RLB hash table */ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) { struct bonding *bond = m->private; struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; if (bond->params.mode != BOND_MODE_ALB) return 0; seq_printf(m, "SourceIP DestinationIP " "Destination MAC DEV\n"); spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", &client_info->ip_src, &client_info->ip_dst, &client_info->mac_dst, client_info->slave->dev->name); } spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); return 0; } static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file) { return single_open(file, bond_debug_rlb_hash_show, inode->i_private); } static const struct file_operations bond_debug_rlb_hash_fops = { .owner = THIS_MODULE, .open = bond_debug_rlb_hash_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void bond_debug_register(struct bonding *bond) { if (!bonding_debug_root) return; bond->debug_dir = debugfs_create_dir(bond->dev->name, bonding_debug_root); if (!bond->debug_dir) { pr_warning("%s: Warning: failed to register to debugfs\n", bond->dev->name); return; } debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir, bond, &bond_debug_rlb_hash_fops); } void bond_debug_unregister(struct bonding *bond) { if (!bonding_debug_root) return; debugfs_remove_recursive(bond->debug_dir); } void bond_debug_reregister(struct bonding *bond) { struct dentry *d; if (!bonding_debug_root) return; d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); if (d) { bond->debug_dir = d; } else { pr_warning("%s: Warning: failed to reregister, " "so just unregister old one\n", bond->dev->name); bond_debug_unregister(bond); } } void bond_create_debugfs(void) { bonding_debug_root = debugfs_create_dir("bonding", NULL); if (!bonding_debug_root) { pr_warning("Warning: Cannot create bonding directory" " in debugfs\n"); } } void bond_destroy_debugfs(void) { debugfs_remove_recursive(bonding_debug_root); bonding_debug_root = NULL; } #else /* !CONFIG_DEBUG_FS */ void bond_debug_register(struct bonding *bond) { } void bond_debug_unregister(struct bonding *bond) { } void bond_debug_reregister(struct bonding *bond) { } void bond_create_debugfs(void) { } void bond_destroy_debugfs(void) { } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
TD-Project/android_kernel_htc_m7
drivers/usb/serial/option.c
2725
75511
/* USB Driver for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - nonstandard flow (Option devices) control - controlling the baud rate doesn't make sense This driver is named "option" because the most common device it's used for is a PC-Card (with an internal OHCI-USB interface, behind which the GSM interface sits), made by Option Inc. Some of the "one port" devices actually exhibit multiple USB instances on the USB bus. This is not a bug, these ports are used for different device features. */ #define DRIVER_VERSION "v0.7.2" #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "usb-wwan.h" /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); static int option_send_setup(struct usb_serial_port *port); static void option_instat_callback(struct urb *urb); /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 #define OPTION_PRODUCT_COLT 0x5000 #define OPTION_PRODUCT_RICOLA 0x6000 #define OPTION_PRODUCT_RICOLA_LIGHT 0x6100 #define OPTION_PRODUCT_RICOLA_QUAD 0x6200 #define OPTION_PRODUCT_RICOLA_QUAD_LIGHT 0x6300 #define OPTION_PRODUCT_RICOLA_NDIS 0x6050 #define OPTION_PRODUCT_RICOLA_NDIS_LIGHT 0x6150 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD 0x6250 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT 0x6350 #define OPTION_PRODUCT_COBRA 0x6500 #define OPTION_PRODUCT_COBRA_BUS 0x6501 #define OPTION_PRODUCT_VIPER 0x6600 #define OPTION_PRODUCT_VIPER_BUS 0x6601 #define OPTION_PRODUCT_GT_MAX_READY 0x6701 #define OPTION_PRODUCT_FUJI_MODEM_LIGHT 0x6721 #define OPTION_PRODUCT_FUJI_MODEM_GT 0x6741 #define OPTION_PRODUCT_FUJI_MODEM_EX 0x6761 #define OPTION_PRODUCT_KOI_MODEM 0x6800 #define OPTION_PRODUCT_SCORPION_MODEM 0x6901 #define OPTION_PRODUCT_ETNA_MODEM 0x7001 #define OPTION_PRODUCT_ETNA_MODEM_LITE 0x7021 #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 #define HUAWEI_PRODUCT_E600 0x1001 #define HUAWEI_PRODUCT_E220 0x1003 #define HUAWEI_PRODUCT_E220BIS 0x1004 #define HUAWEI_PRODUCT_E1401 0x1401 #define HUAWEI_PRODUCT_E1402 0x1402 #define HUAWEI_PRODUCT_E1403 0x1403 #define HUAWEI_PRODUCT_E1404 0x1404 #define HUAWEI_PRODUCT_E1405 0x1405 #define HUAWEI_PRODUCT_E1406 0x1406 #define HUAWEI_PRODUCT_E1407 0x1407 #define HUAWEI_PRODUCT_E1408 0x1408 #define HUAWEI_PRODUCT_E1409 0x1409 #define HUAWEI_PRODUCT_E140A 0x140A #define HUAWEI_PRODUCT_E140B 0x140B #define HUAWEI_PRODUCT_E140C 0x140C #define HUAWEI_PRODUCT_E140D 0x140D #define HUAWEI_PRODUCT_E140E 0x140E #define HUAWEI_PRODUCT_E140F 0x140F #define HUAWEI_PRODUCT_E1410 0x1410 #define HUAWEI_PRODUCT_E1411 0x1411 #define HUAWEI_PRODUCT_E1412 0x1412 #define HUAWEI_PRODUCT_E1413 0x1413 #define HUAWEI_PRODUCT_E1414 0x1414 #define HUAWEI_PRODUCT_E1415 0x1415 #define HUAWEI_PRODUCT_E1416 0x1416 #define HUAWEI_PRODUCT_E1417 0x1417 #define HUAWEI_PRODUCT_E1418 0x1418 #define HUAWEI_PRODUCT_E1419 0x1419 #define HUAWEI_PRODUCT_E141A 0x141A #define HUAWEI_PRODUCT_E141B 0x141B #define HUAWEI_PRODUCT_E141C 0x141C #define HUAWEI_PRODUCT_E141D 0x141D #define HUAWEI_PRODUCT_E141E 0x141E #define HUAWEI_PRODUCT_E141F 0x141F #define HUAWEI_PRODUCT_E1420 0x1420 #define HUAWEI_PRODUCT_E1421 0x1421 #define HUAWEI_PRODUCT_E1422 0x1422 #define HUAWEI_PRODUCT_E1423 0x1423 #define HUAWEI_PRODUCT_E1424 0x1424 #define HUAWEI_PRODUCT_E1425 0x1425 #define HUAWEI_PRODUCT_E1426 0x1426 #define HUAWEI_PRODUCT_E1427 0x1427 #define HUAWEI_PRODUCT_E1428 0x1428 #define HUAWEI_PRODUCT_E1429 0x1429 #define HUAWEI_PRODUCT_E142A 0x142A #define HUAWEI_PRODUCT_E142B 0x142B #define HUAWEI_PRODUCT_E142C 0x142C #define HUAWEI_PRODUCT_E142D 0x142D #define HUAWEI_PRODUCT_E142E 0x142E #define HUAWEI_PRODUCT_E142F 0x142F #define HUAWEI_PRODUCT_E1430 0x1430 #define HUAWEI_PRODUCT_E1431 0x1431 #define HUAWEI_PRODUCT_E1432 0x1432 #define HUAWEI_PRODUCT_E1433 0x1433 #define HUAWEI_PRODUCT_E1434 0x1434 #define HUAWEI_PRODUCT_E1435 0x1435 #define HUAWEI_PRODUCT_E1436 0x1436 #define HUAWEI_PRODUCT_E1437 0x1437 #define HUAWEI_PRODUCT_E1438 0x1438 #define HUAWEI_PRODUCT_E1439 0x1439 #define HUAWEI_PRODUCT_E143A 0x143A #define HUAWEI_PRODUCT_E143B 0x143B #define HUAWEI_PRODUCT_E143C 0x143C #define HUAWEI_PRODUCT_E143D 0x143D #define HUAWEI_PRODUCT_E143E 0x143E #define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_K3806 0x14AE #define HUAWEI_PRODUCT_K4605 0x14C6 #define HUAWEI_PRODUCT_K3770 0x14C9 #define HUAWEI_PRODUCT_K3771 0x14CA #define HUAWEI_PRODUCT_K4510 0x14CB #define HUAWEI_PRODUCT_K4511 0x14CC #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 #define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 #define QUANTA_PRODUCT_Q111 0xEA03 #define QUANTA_PRODUCT_GLX 0xEA04 #define QUANTA_PRODUCT_GKE 0xEA05 #define QUANTA_PRODUCT_GLE 0xEA06 #define NOVATELWIRELESS_VENDOR_ID 0x1410 /* YISO PRODUCTS */ #define YISO_VENDOR_ID 0x0EAB #define YISO_PRODUCT_U893 0xC893 /* * NOVATEL WIRELESS PRODUCTS * * Note from Novatel Wireless: * If your Novatel modem does not work on linux, don't * change the option module, but check our website. If * that does not help, contact ddeschepper@nvtl.com */ /* MERLIN EVDO PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_V640 0x1100 #define NOVATELWIRELESS_PRODUCT_V620 0x1110 #define NOVATELWIRELESS_PRODUCT_V740 0x1120 #define NOVATELWIRELESS_PRODUCT_V720 0x1130 /* MERLIN HSDPA/HSPA PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_U730 0x1400 #define NOVATELWIRELESS_PRODUCT_U740 0x1410 #define NOVATELWIRELESS_PRODUCT_U870 0x1420 #define NOVATELWIRELESS_PRODUCT_XU870 0x1430 #define NOVATELWIRELESS_PRODUCT_X950D 0x1450 /* EXPEDITE PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 #define NOVATELWIRELESS_PRODUCT_E725 0x2120 #define NOVATELWIRELESS_PRODUCT_ES620 0x2130 #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 /* OVATION PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 /* * Note from Novatel Wireless: * All PID in the 5xxx range are currently reserved for * auto-install CDROMs, and should not be added to this * module. * * #define NOVATELWIRELESS_PRODUCT_U727 0x5010 * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 */ #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 #define NOVATELWIRELESS_PRODUCT_MC780 0x6010 #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000 #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007 #define NOVATELWIRELESS_PRODUCT_MC996D 0x7030 #define NOVATELWIRELESS_PRODUCT_MF3470 0x7041 #define NOVATELWIRELESS_PRODUCT_MC547 0x7042 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 #define NOVATELWIRELESS_PRODUCT_G1 0xA001 #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 #define AMOI_PRODUCT_H01A 0x7002 #define AMOI_PRODUCT_H02 0x0802 #define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407 #define DELL_VENDOR_ID 0x413C /* Dell modems */ #define DELL_PRODUCT_5700_MINICARD 0x8114 #define DELL_PRODUCT_5500_MINICARD 0x8115 #define DELL_PRODUCT_5505_MINICARD 0x8116 #define DELL_PRODUCT_5700_EXPRESSCARD 0x8117 #define DELL_PRODUCT_5510_EXPRESSCARD 0x8118 #define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128 #define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129 #define DELL_PRODUCT_5720_MINICARD_VZW 0x8133 #define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134 #define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135 #define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136 #define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137 #define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138 #define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180 #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a #define ANYDATA_VENDOR_ID 0x16d5 #define ANYDATA_PRODUCT_ADU_620UW 0x6202 #define ANYDATA_PRODUCT_ADU_E100A 0x6501 #define ANYDATA_PRODUCT_ADU_500A 0x6502 #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_1004 0x1004 #define BANDRICH_PRODUCT_1005 0x1005 #define BANDRICH_PRODUCT_1006 0x1006 #define BANDRICH_PRODUCT_1007 0x1007 #define BANDRICH_PRODUCT_1008 0x1008 #define BANDRICH_PRODUCT_1009 0x1009 #define BANDRICH_PRODUCT_100A 0x100a #define BANDRICH_PRODUCT_100B 0x100b #define BANDRICH_PRODUCT_100C 0x100c #define BANDRICH_PRODUCT_100D 0x100d #define BANDRICH_PRODUCT_100E 0x100e #define BANDRICH_PRODUCT_100F 0x100f #define BANDRICH_PRODUCT_1010 0x1010 #define BANDRICH_PRODUCT_1011 0x1011 #define BANDRICH_PRODUCT_1012 0x1012 #define QUALCOMM_VENDOR_ID 0x05C6 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6008 0x6008 #define CMOTECH_PRODUCT_6280 0x6280 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864G 0x1004 #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_AC8710 0xfff1 #define ZTE_PRODUCT_AC2726 0xfff5 #define ZTE_PRODUCT_AC8710T 0xffff #define ZTE_PRODUCT_MC2718 0xffe8 #define ZTE_PRODUCT_AD3812 0xffeb #define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 #define DLINK_PRODUCT_DWM_652_U5 0xce16 #define DLINK_PRODUCT_DWM_652_U5A 0xce1e #define QISDA_VENDOR_ID 0x1da5 #define QISDA_PRODUCT_H21_4512 0x4512 #define QISDA_PRODUCT_H21_4523 0x4523 #define QISDA_PRODUCT_H20_4515 0x4515 #define QISDA_PRODUCT_H20_4518 0x4518 #define QISDA_PRODUCT_H20_4519 0x4519 /* TLAYTECH PRODUCTS */ #define TLAYTECH_VENDOR_ID 0x20B9 #define TLAYTECH_PRODUCT_TEU800 0x1682 /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 #define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 #define TOSHIBA_PRODUCT_G450 0x0d45 #define ALINK_VENDOR_ID 0x1e0e #define ALINK_PRODUCT_PH300 0x9100 #define ALINK_PRODUCT_3GU 0x9200 /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 #define PIRELLI_PRODUCT_C100_2 0x1003 #define PIRELLI_PRODUCT_1004 0x1004 #define PIRELLI_PRODUCT_1005 0x1005 #define PIRELLI_PRODUCT_1006 0x1006 #define PIRELLI_PRODUCT_1007 0x1007 #define PIRELLI_PRODUCT_1008 0x1008 #define PIRELLI_PRODUCT_1009 0x1009 #define PIRELLI_PRODUCT_100A 0x100a #define PIRELLI_PRODUCT_100B 0x100b #define PIRELLI_PRODUCT_100C 0x100c #define PIRELLI_PRODUCT_100D 0x100d #define PIRELLI_PRODUCT_100E 0x100e #define PIRELLI_PRODUCT_100F 0x100f #define PIRELLI_PRODUCT_1011 0x1011 #define PIRELLI_PRODUCT_1012 0x1012 /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 /* Longcheer/Longsung vendor ID; makes whitelabel devices that * many other vendors like 4G Systems, Alcatel, ChinaBird, * Mobidata, etc sell under their own brand names. */ #define LONGCHEER_VENDOR_ID 0x1c9e /* 4G Systems products */ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 /* Cinterion (formerly Siemens) products */ #define SIEMENS_VENDOR_ID 0x0681 #define CINTERION_VENDOR_ID 0x1e2d #define CINTERION_PRODUCT_HC25_MDM 0x0047 #define CINTERION_PRODUCT_HC25_MDMNET 0x0040 #define CINTERION_PRODUCT_HC28_MDM 0x004C #define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 /* Celot products */ #define CELOT_VENDOR_ID 0x211f #define CELOT_PRODUCT_CT680M 0x6801 /* ONDA Communication vendor id */ #define ONDA_VENDOR_ID 0x1ee8 /* ONDA MT825UP HSDPA 14.2 modem */ #define ONDA_MT825UP 0x000b /* Samsung products */ #define SAMSUNG_VENDOR_ID 0x04e8 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 /* YUGA products www.yuga-info.com*/ #define YUGA_VENDOR_ID 0x257A #define YUGA_PRODUCT_CEM600 0x1601 #define YUGA_PRODUCT_CEM610 0x1602 #define YUGA_PRODUCT_CEM500 0x1603 #define YUGA_PRODUCT_CEM510 0x1604 #define YUGA_PRODUCT_CEM800 0x1605 #define YUGA_PRODUCT_CEM900 0x1606 #define YUGA_PRODUCT_CEU818 0x1607 #define YUGA_PRODUCT_CEU816 0x1608 #define YUGA_PRODUCT_CEU828 0x1609 #define YUGA_PRODUCT_CEU826 0x160A #define YUGA_PRODUCT_CEU518 0x160B #define YUGA_PRODUCT_CEU516 0x160C #define YUGA_PRODUCT_CEU528 0x160D #define YUGA_PRODUCT_CEU526 0x160F #define YUGA_PRODUCT_CWM600 0x2601 #define YUGA_PRODUCT_CWM610 0x2602 #define YUGA_PRODUCT_CWM500 0x2603 #define YUGA_PRODUCT_CWM510 0x2604 #define YUGA_PRODUCT_CWM800 0x2605 #define YUGA_PRODUCT_CWM900 0x2606 #define YUGA_PRODUCT_CWU718 0x2607 #define YUGA_PRODUCT_CWU716 0x2608 #define YUGA_PRODUCT_CWU728 0x2609 #define YUGA_PRODUCT_CWU726 0x260A #define YUGA_PRODUCT_CWU518 0x260B #define YUGA_PRODUCT_CWU516 0x260C #define YUGA_PRODUCT_CWU528 0x260D #define YUGA_PRODUCT_CWU526 0x260F #define YUGA_PRODUCT_CLM600 0x2601 #define YUGA_PRODUCT_CLM610 0x2602 #define YUGA_PRODUCT_CLM500 0x2603 #define YUGA_PRODUCT_CLM510 0x2604 #define YUGA_PRODUCT_CLM800 0x2605 #define YUGA_PRODUCT_CLM900 0x2606 #define YUGA_PRODUCT_CLU718 0x2607 #define YUGA_PRODUCT_CLU716 0x2608 #define YUGA_PRODUCT_CLU728 0x2609 #define YUGA_PRODUCT_CLU726 0x260A #define YUGA_PRODUCT_CLU518 0x260B #define YUGA_PRODUCT_CLU516 0x260C #define YUGA_PRODUCT_CLU528 0x260D #define YUGA_PRODUCT_CLU526 0x260F /* Viettel products */ #define VIETTEL_VENDOR_ID 0x2262 #define VIETTEL_PRODUCT_VT1000 0x0002 /* ZD Incorporated */ #define ZD_VENDOR_ID 0x0685 #define ZD_PRODUCT_7000 0x7000 /* LG products */ #define LG_VENDOR_ID 0x1004 #define LG_PRODUCT_L02C 0x618f /* MediaTek products */ #define MEDIATEK_VENDOR_ID 0x0e8d /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, OPTION_BLACKLIST_SENDSETUP = 1, OPTION_BLACKLIST_RESERVED_IF = 2 }; #define MAX_BL_NUM 8 struct option_blacklist_info { /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ const unsigned long sendsetup; /* bitfield of interface numbers for OPTION_BLACKLIST_RESERVED_IF */ const unsigned long reserved; }; static const struct option_blacklist_info four_g_w14_blacklist = { .sendsetup = BIT(0) | BIT(1), }; static const struct option_blacklist_info alcatel_x200_blacklist = { .sendsetup = BIT(0) | BIT(1), }; static const struct option_blacklist_info zte_0037_blacklist = { .sendsetup = BIT(0) | BIT(1), }; static const struct option_blacklist_info zte_k3765_z_blacklist = { .sendsetup = BIT(0) | BIT(1) | BIT(2), .reserved = BIT(4), }; static const struct option_blacklist_info zte_ad3812_z_blacklist = { .sendsetup = BIT(0) | BIT(1) | BIT(2), }; static const struct option_blacklist_info zte_mc2718_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), }; static const struct option_blacklist_info zte_mc2716_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3), }; static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; static const struct option_blacklist_info net_intf1_blacklist = { .reserved = BIT(1), }; static const struct option_blacklist_info net_intf3_blacklist = { .reserved = BIT(3), }; static const struct option_blacklist_info net_intf4_blacklist = { .reserved = BIT(4), }; static const struct option_blacklist_info net_intf5_blacklist = { .reserved = BIT(5), }; static const struct option_blacklist_info zte_mf626_blacklist = { .sendsetup = BIT(0) | BIT(1), .reserved = BIT(4), }; static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_0037_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, /* Pirelli */ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, /* Cinterion */ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); static struct usb_driver option_driver = { .name = "option", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, #ifdef CONFIG_PM .suspend = usb_serial_suspend, .resume = usb_serial_resume, .supports_autosuspend = 1, #endif .id_table = option_ids, }; /* The card has three separate interfaces, which the serial driver * recognizes separately, thus num_port=1. */ static struct usb_serial_driver option_1port_device = { .driver = { .owner = THIS_MODULE, .name = "option1", }, .description = "GSM modem (1-port)", .id_table = option_ids, .num_ports = 1, .probe = option_probe, .open = usb_wwan_open, .close = usb_wwan_close, .dtr_rts = usb_wwan_dtr_rts, .write = usb_wwan_write, .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .set_termios = usb_wwan_set_termios, .tiocmget = usb_wwan_tiocmget, .tiocmset = usb_wwan_tiocmset, .ioctl = usb_wwan_ioctl, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, .release = usb_wwan_release, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, #endif }; static struct usb_serial_driver * const serial_drivers[] = { &option_1port_device, NULL }; static bool debug; /* per port private data */ #define N_IN_URB 4 #define N_OUT_URB 4 #define IN_BUFLEN 4096 #define OUT_BUFLEN 4096 struct option_port_private { /* Input endpoints and buffer for this port */ struct urb *in_urbs[N_IN_URB]; u8 *in_buffer[N_IN_URB]; /* Output endpoints and buffer for this port */ struct urb *out_urbs[N_OUT_URB]; u8 *out_buffer[N_OUT_URB]; unsigned long out_busy; /* Bit vector of URBs in use */ int opened; struct usb_anchor delayed; /* Settings for the port */ int rts_state; /* Handshaking pins (outputs) */ int dtr_state; int cts_state; /* Handshaking pins (inputs) */ int dsr_state; int dcd_state; int ri_state; unsigned long tx_start_time[N_OUT_URB]; }; module_usb_serial_driver(option_driver, serial_drivers); static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, const struct option_blacklist_info *blacklist) { unsigned long num; const unsigned long *intf_list; if (blacklist) { if (reason == OPTION_BLACKLIST_SENDSETUP) intf_list = &blacklist->sendsetup; else if (reason == OPTION_BLACKLIST_RESERVED_IF) intf_list = &blacklist->reserved; else { BUG_ON(reason); return false; } for_each_set_bit(num, intf_list, MAX_BL_NUM + 1) { if (num == ifnum) return true; } } return false; } static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_wwan_intf_private *data; /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) return -ENODEV; /* Bandrich modem and AT command interface is 0xff */ if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) return -ENODEV; /* Don't bind reserved interfaces (like network ones) which often have * the same class/subclass/protocol as the serial interfaces. Look at * the Windows driver .INF files for reserved interface numbers. */ if (is_blacklisted( serial->interface->cur_altsetting->desc.bInterfaceNumber, OPTION_BLACKLIST_RESERVED_IF, (const struct option_blacklist_info *) id->driver_info)) return -ENODEV; /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID && serial->dev->descriptor.idProduct == SAMSUNG_PRODUCT_GT_B3730 && serial->interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA) return -ENODEV; data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; data->send_setup = option_send_setup; spin_lock_init(&data->susp_lock); data->private = (void *)id->driver_info; return 0; } static void option_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct option_port_private *portdata = usb_get_serial_port_data(port); dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dbg("%s: NULL req_pkt", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); dbg("%s: signal x%x", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); if (old_dcd_state && !portdata->dcd_state) { struct tty_struct *tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty)) tty_hangup(tty); tty_kref_put(tty); } } else { dbg("%s: type %x req %x", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else err("%s: error %d", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (status != -ESHUTDOWN && status != -ENOENT) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dbg("%s: resubmit intr urb failed. (%d)", __func__, err); } } /** send RTS/DTR state to the port. * * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN * CDC. */ static int option_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = (struct usb_wwan_intf_private *) serial->private; struct option_port_private *portdata; int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; int val = 0; dbg("%s", __func__); if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP, (struct option_blacklist_info *) intfdata->private)) { dbg("No send_setup on blacklisted interface #%d\n", ifNum); return -EIO; } portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= 0x01; if (portdata->rts_state) val |= 0x02; return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); } MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
CyanogenMod/sony-kernel-msm8960
arch/arm/mach-ks8695/irq.c
2981
4284
/* * arch/arm/mach-ks8695/irq.c * * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk> * Copyright (C) 2006 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sysdev.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include <mach/regs-gpio.h> static void ks8695_irq_mask(struct irq_data *d) { unsigned long inten; inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN); inten &= ~(1 << d->irq); __raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN); } static void ks8695_irq_unmask(struct irq_data *d) { unsigned long inten; inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN); inten |= (1 << d->irq); __raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN); } static void ks8695_irq_ack(struct irq_data *d) { __raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST); } static struct irq_chip ks8695_irq_level_chip; static struct irq_chip ks8695_irq_edge_chip; static int ks8695_irq_set_type(struct irq_data *d, unsigned int type) { unsigned long ctrl, mode; unsigned short level_triggered = 0; ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC); switch (type) { case IRQ_TYPE_LEVEL_HIGH: mode = IOPC_TM_HIGH; level_triggered = 1; break; case IRQ_TYPE_LEVEL_LOW: mode = IOPC_TM_LOW; level_triggered = 1; break; case IRQ_TYPE_EDGE_RISING: mode = IOPC_TM_RISING; break; case IRQ_TYPE_EDGE_FALLING: mode = IOPC_TM_FALLING; break; case IRQ_TYPE_EDGE_BOTH: mode = IOPC_TM_EDGE; break; default: return -EINVAL; } switch (d->irq) { case KS8695_IRQ_EXTERN0: ctrl &= ~IOPC_IOEINT0TM; ctrl |= IOPC_IOEINT0_MODE(mode); break; case KS8695_IRQ_EXTERN1: ctrl &= ~IOPC_IOEINT1TM; ctrl |= IOPC_IOEINT1_MODE(mode); break; case KS8695_IRQ_EXTERN2: ctrl &= ~IOPC_IOEINT2TM; ctrl |= IOPC_IOEINT2_MODE(mode); break; case KS8695_IRQ_EXTERN3: ctrl &= ~IOPC_IOEINT3TM; ctrl |= IOPC_IOEINT3_MODE(mode); break; default: return -EINVAL; } if (level_triggered) { irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip, handle_level_irq); } else { irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip, handle_edge_irq); } __raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC); return 0; } static struct irq_chip ks8695_irq_level_chip = { .irq_ack = ks8695_irq_mask, .irq_mask = ks8695_irq_mask, .irq_unmask = ks8695_irq_unmask, .irq_set_type = ks8695_irq_set_type, }; static struct irq_chip ks8695_irq_edge_chip = { .irq_ack = ks8695_irq_ack, .irq_mask = ks8695_irq_mask, .irq_unmask = ks8695_irq_unmask, .irq_set_type = ks8695_irq_set_type, }; void __init ks8695_init_irq(void) { unsigned int irq; /* Disable all interrupts initially */ __raw_writel(0, KS8695_IRQ_VA + KS8695_INTMC); __raw_writel(0, KS8695_IRQ_VA + KS8695_INTEN); for (irq = 0; irq < NR_IRQS; irq++) { switch (irq) { /* Level-triggered interrupts */ case KS8695_IRQ_BUS_ERROR: case KS8695_IRQ_UART_MODEM_STATUS: case KS8695_IRQ_UART_LINE_STATUS: case KS8695_IRQ_UART_RX: case KS8695_IRQ_COMM_TX: case KS8695_IRQ_COMM_RX: irq_set_chip_and_handler(irq, &ks8695_irq_level_chip, handle_level_irq); break; /* Edge-triggered interrupts */ default: /* clear pending bit */ ks8695_irq_ack(irq_get_irq_data(irq)); irq_set_chip_and_handler(irq, &ks8695_irq_edge_chip, handle_edge_irq); } set_irq_flags(irq, IRQF_VALID); } }
gpl-2.0
diaevd/android_kernel_samsung_sm-t325
drivers/xen/xen-pciback/pciback_ops.c
3493
10634
/* * PCI Backend Operations - respond to PCI requests from Frontend * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/module.h> #include <linux/wait.h> #include <linux/bitops.h> #include <xen/events.h> #include <linux/sched.h> #include "pciback.h" int verbose_request; module_param(verbose_request, int, 0644); static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id); /* Ensure a device is has the fake IRQ handler "turned on/off" and is * ready to be exported. This MUST be run after xen_pcibk_reset_device * which does the actual PCI device enable/disable. */ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) { struct xen_pcibk_dev_data *dev_data; int rc; int enable = 0; dev_data = pci_get_drvdata(dev); if (!dev_data) return; /* We don't deal with bridges */ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) return; if (reset) { dev_data->enable_intx = 0; dev_data->ack_intr = 0; } enable = dev_data->enable_intx; /* Asked to disable, but ISR isn't runnig */ if (!enable && !dev_data->isr_on) return; /* Squirrel away the IRQs in the dev_data. We need this * b/c when device transitions to MSI, the dev->irq is * overwritten with the MSI vector. */ if (enable) dev_data->irq = dev->irq; /* * SR-IOV devices in all use MSI-X and have no legacy * interrupts, so inhibit creating a fake IRQ handler for them. */ if (dev_data->irq == 0) goto out; dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n", dev_data->irq_name, dev_data->irq, pci_is_enabled(dev) ? "on" : "off", dev->msi_enabled ? "MSI" : "", dev->msix_enabled ? "MSI/X" : "", dev_data->isr_on ? "enable" : "disable", enable ? "enable" : "disable"); if (enable) { rc = request_irq(dev_data->irq, xen_pcibk_guest_interrupt, IRQF_SHARED, dev_data->irq_name, dev); if (rc) { dev_err(&dev->dev, "%s: failed to install fake IRQ " \ "handler for IRQ %d! (rc:%d)\n", dev_data->irq_name, dev_data->irq, rc); goto out; } } else { free_irq(dev_data->irq, dev); dev_data->irq = 0; } dev_data->isr_on = enable; dev_data->ack_intr = enable; out: dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n", dev_data->irq_name, dev_data->irq, pci_is_enabled(dev) ? "on" : "off", dev->msi_enabled ? "MSI" : "", dev->msix_enabled ? "MSI/X" : "", enable ? (dev_data->isr_on ? "enabled" : "failed to enable") : (dev_data->isr_on ? "failed to disable" : "disabled")); } /* Ensure a device is "turned off" and ready to be exported. * (Also see xen_pcibk_config_reset to ensure virtual configuration space is * ready to be re-exported) */ void xen_pcibk_reset_device(struct pci_dev *dev) { u16 cmd; xen_pcibk_control_isr(dev, 1 /* reset device */); /* Disable devices (but not bridges) */ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { #ifdef CONFIG_PCI_MSI /* The guest could have been abruptly killed without * disabling MSI/MSI-X interrupts.*/ if (dev->msix_enabled) pci_disable_msix(dev); if (dev->msi_enabled) pci_disable_msi(dev); #endif pci_disable_device(dev); pci_write_config_word(dev, PCI_COMMAND, 0); dev->is_busmaster = 0; } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_INVALIDATE)) { cmd &= ~(PCI_COMMAND_INVALIDATE); pci_write_config_word(dev, PCI_COMMAND, cmd); dev->is_busmaster = 0; } } } #ifdef CONFIG_PCI_MSI static int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int otherend = pdev->xdev->otherend_id; int status; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); status = pci_enable_msi(dev); if (status) { printk(KERN_ERR "error enable msi for guest %x status %x\n", otherend, status); op->value = 0; return XEN_PCI_ERR_op_failed; } /* The value the guest needs is actually the IDT vector, not the * the local domain's IRQ number. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), op->value); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return 0; } static int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", pci_name(dev)); pci_disable_msi(dev); op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), op->value); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; return 0; } static int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int i, result; struct msix_entry *entries; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", pci_name(dev)); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; for (i = 0; i < op->value; i++) { entries[i].entry = op->msix_entries[i].entry; entries[i].vector = op->msix_entries[i].vector; } result = pci_enable_msix(dev, entries, op->value); if (result == 0) { for (i = 0; i < op->value; i++) { op->msix_entries[i].entry = entries[i].entry; if (entries[i].vector) op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: " \ "MSI-X[%d]: %d\n", pci_name(dev), i, op->msix_entries[i].vector); } } else { printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", pci_name(dev), result); } kfree(entries); op->value = result; dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return result > 0 ? 0 : result; } static int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", pci_name(dev)); pci_disable_msix(dev); /* * SR-IOV devices (which don't have any legacy IRQ) have * an undefined IRQ value of zero. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), op->value); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; return 0; } #endif /* * Now the same evtchn is used for both pcifront conf_read_write request * as well as pcie aer front end ack. We use a new work_queue to schedule * xen_pcibk conf_read_write service for avoiding confict with aer_core * do_recovery job which also use the system default work_queue */ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) { /* Check that frontend is requesting an operation and that we are not * already processing a request */ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { queue_work(xen_pcibk_wq, &pdev->op_work); } /*_XEN_PCIB_active should have been cleared by pcifront. And also make sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) && test_bit(_PCIB_op_pending, &pdev->flags)) { wake_up(&xen_pcibk_aer_wait_queue); } } /* Performing the configuration space reads/writes must not be done in atomic * context because some of the pci_* functions can sleep (mostly due to ACPI * use of semaphores). This function is intended to be called from a work * queue in process context taking a struct xen_pcibk_device as a parameter */ void xen_pcibk_do_op(struct work_struct *data) { struct xen_pcibk_device *pdev = container_of(data, struct xen_pcibk_device, op_work); struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data = NULL; struct xen_pci_op *op = &pdev->sh_info->op; int test_intx = 0; dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); if (dev == NULL) op->err = XEN_PCI_ERR_dev_not_found; else { dev_data = pci_get_drvdata(dev); if (dev_data) test_intx = dev_data->enable_intx; switch (op->cmd) { case XEN_PCI_OP_conf_read: op->err = xen_pcibk_config_read(dev, op->offset, op->size, &op->value); break; case XEN_PCI_OP_conf_write: op->err = xen_pcibk_config_write(dev, op->offset, op->size, op->value); break; #ifdef CONFIG_PCI_MSI case XEN_PCI_OP_enable_msi: op->err = xen_pcibk_enable_msi(pdev, dev, op); break; case XEN_PCI_OP_disable_msi: op->err = xen_pcibk_disable_msi(pdev, dev, op); break; case XEN_PCI_OP_enable_msix: op->err = xen_pcibk_enable_msix(pdev, dev, op); break; case XEN_PCI_OP_disable_msix: op->err = xen_pcibk_disable_msix(pdev, dev, op); break; #endif default: op->err = XEN_PCI_ERR_not_implemented; break; } } if (!op->err && dev && dev_data) { /* Transition detected */ if ((dev_data->enable_intx != test_intx)) xen_pcibk_control_isr(dev, 0 /* no reset */); } /* Tell the driver domain that we're done. */ wmb(); clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); notify_remote_via_irq(pdev->evtchn_irq); /* Mark that we're done. */ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ clear_bit(_PDEVF_op_active, &pdev->flags); smp_mb__after_clear_bit(); /* /before/ final check for work */ /* Check to see if the driver domain tried to start another request in * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */ xen_pcibk_test_and_schedule_op(pdev); } irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id) { struct xen_pcibk_device *pdev = dev_id; xen_pcibk_test_and_schedule_op(pdev); return IRQ_HANDLED; } static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id) { struct pci_dev *dev = (struct pci_dev *)dev_id; struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev); if (dev_data->isr_on && dev_data->ack_intr) { dev_data->handled++; if ((dev_data->handled % 1000) == 0) { if (xen_test_irq_shared(irq)) { printk(KERN_INFO "%s IRQ line is not shared " "with other domains. Turning ISR off\n", dev_data->irq_name); dev_data->ack_intr = 0; } } return IRQ_HANDLED; } return IRQ_NONE; }
gpl-2.0
blakha/OS_TEST
arch/sh/kernel/kgdb.c
3749
10412
/* * SuperH KGDB support * * Copyright (C) 2008 - 2012 Paul Mundt * * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/sched.h> #include <asm/cacheflush.h> #include <asm/traps.h> /* Macros for single step instruction identification */ #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) #define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) #define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \ (((op) & 0x7f ) << 1)) #define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00) #define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00) #define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000) #define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ (((op) & 0x7ff) << 1)) #define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023) #define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8) #define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000) #define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ (((op) & 0x7ff) << 1)) #define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003) #define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf) #define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b) #define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf) #define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b) #define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf) #define OPCODE_RTS(op) ((op) == 0xb) #define OPCODE_RTE(op) ((op) == 0x2b) #define SR_T_BIT_MASK 0x1 #define STEP_OPCODE 0xc33d /* Calculate the new address for after a step */ static short *get_step_address(struct pt_regs *linux_regs) { insn_size_t op = __raw_readw(linux_regs->pc); long addr; /* BT */ if (OPCODE_BT(op)) { if (linux_regs->sr & SR_T_BIT_MASK) addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); else addr = linux_regs->pc + 2; } /* BTS */ else if (OPCODE_BTS(op)) { if (linux_regs->sr & SR_T_BIT_MASK) addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); else addr = linux_regs->pc + 4; /* Not in delay slot */ } /* BF */ else if (OPCODE_BF(op)) { if (!(linux_regs->sr & SR_T_BIT_MASK)) addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); else addr = linux_regs->pc + 2; } /* BFS */ else if (OPCODE_BFS(op)) { if (!(linux_regs->sr & SR_T_BIT_MASK)) addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); else addr = linux_regs->pc + 4; /* Not in delay slot */ } /* BRA */ else if (OPCODE_BRA(op)) addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op); /* BRAF */ else if (OPCODE_BRAF(op)) addr = linux_regs->pc + 4 + linux_regs->regs[OPCODE_BRAF_REG(op)]; /* BSR */ else if (OPCODE_BSR(op)) addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op); /* BSRF */ else if (OPCODE_BSRF(op)) addr = linux_regs->pc + 4 + linux_regs->regs[OPCODE_BSRF_REG(op)]; /* JMP */ else if (OPCODE_JMP(op)) addr = linux_regs->regs[OPCODE_JMP_REG(op)]; /* JSR */ else if (OPCODE_JSR(op)) addr = linux_regs->regs[OPCODE_JSR_REG(op)]; /* RTS */ else if (OPCODE_RTS(op)) addr = linux_regs->pr; /* RTE */ else if (OPCODE_RTE(op)) addr = linux_regs->regs[15]; /* Other */ else addr = linux_regs->pc + instruction_size(op); flush_icache_range(addr, addr + instruction_size(op)); return (short *)addr; } /* * Replace the instruction immediately after the current instruction * (i.e. next in the expected flow of control) with a trap instruction, * so that returning will cause only a single instruction to be executed. * Note that this model is slightly broken for instructions with delay * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the * instruction in the delay slot will be executed. */ static unsigned long stepped_address; static insn_size_t stepped_opcode; static void do_single_step(struct pt_regs *linux_regs) { /* Determine where the target instruction will send us to */ unsigned short *addr = get_step_address(linux_regs); stepped_address = (int)addr; /* Replace it */ stepped_opcode = __raw_readw((long)addr); *addr = STEP_OPCODE; /* Flush and return */ flush_icache_range((long)addr, (long)addr + instruction_size(stepped_opcode)); } /* Undo a single step */ static void undo_single_step(struct pt_regs *linux_regs) { /* If we have stepped, put back the old instruction */ /* Use stepped_address in case we stopped elsewhere */ if (stepped_opcode != 0) { __raw_writew(stepped_opcode, stepped_address); flush_icache_range(stepped_address, stepped_address + 2); } stepped_opcode = 0; } struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) }, { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) }, { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) }, { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) }, { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) }, { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) }, { "vbr", GDB_SIZEOF_REG, -1 }, }; int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if (regno < 0 || regno >= DBG_MAX_REG_NUM) return -EINVAL; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].size != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); switch (regno) { case GDB_VBR: __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem)); break; } return dbg_reg_def[regno].name; } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { struct pt_regs *thread_regs = task_pt_regs(p); int reg; /* Initialize to zero */ for (reg = 0; reg < DBG_MAX_REG_NUM; reg++) gdb_regs[reg] = 0; /* * Copy out GP regs 8 to 14. * * switch_to() relies on SR.RB toggling, so regs 0->7 are banked * and need privileged instructions to get to. The r15 value we * fetch from the thread info directly. */ for (reg = GDB_R8; reg < GDB_R15; reg++) gdb_regs[reg] = thread_regs->regs[reg]; gdb_regs[GDB_R15] = p->thread.sp; gdb_regs[GDB_PC] = p->thread.pc; /* * Additional registers we have context for */ gdb_regs[GDB_PR] = thread_regs->pr; gdb_regs[GDB_GBR] = thread_regs->gbr; } int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, char *remcomInBuffer, char *remcomOutBuffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; /* Undo any stepping we may have done */ undo_single_step(linux_regs); switch (remcomInBuffer[0]) { case 'c': case 's': /* try to read optional parameter, pc unchanged if no parm */ ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->pc = addr; case 'D': case 'k': atomic_set(&kgdb_cpu_doing_single_step, -1); if (remcomInBuffer[0] == 's') { do_single_step(linux_regs); kgdb_single_step = 1; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } return 0; } /* this means that we do not want to exit from the handler: */ return -1; } unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { if (exception == 60) return instruction_pointer(regs) - 2; return instruction_pointer(regs); } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->pc = ip; } /* * The primary entry points for the kgdb debug trap table entries. */ BUILD_TRAP_HANDLER(singlestep) { unsigned long flags; TRAP_HANDLER_DECL; local_irq_save(flags); regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); kgdb_handle_exception(0, SIGTRAP, 0, regs); local_irq_restore(flags); } static void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); } void kgdb_roundup_cpus(unsigned long flags) { local_irq_enable(); smp_call_function(kgdb_call_nmi_hook, NULL, 0); local_irq_disable(); } static int __kgdb_notify(struct die_args *args, unsigned long cmd) { int ret; switch (cmd) { case DIE_BREAKPOINT: /* * This means a user thread is single stepping * a system call which should be ignored */ if (test_thread_flag(TIF_SINGLESTEP)) return NOTIFY_DONE; ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err, args->regs); if (ret) return NOTIFY_DONE; break; } return NOTIFY_STOP; } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, /* * Lowest-prio notifier priority, we want to be notified last: */ .priority = -INT_MAX, }; int kgdb_arch_init(void) { return register_die_notifier(&kgdb_notifier); } void kgdb_arch_exit(void) { unregister_die_notifier(&kgdb_notifier); } struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: trapa #0x3c */ #ifdef CONFIG_CPU_LITTLE_ENDIAN .gdb_bpt_instr = { 0x3c, 0xc3 }, #else .gdb_bpt_instr = { 0xc3, 0x3c }, #endif };
gpl-2.0
gchild320/kernel_lge_g3
drivers/net/ethernet/smsc/smc91x.c
4517
63678
/* * smc91x.c * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices. * * Copyright (C) 1996 by Erik Stahlman * Copyright (C) 2001 Standard Microsystems Corporation * Developed by Simple Network Magic Corporation * Copyright (C) 2003 Monta Vista Software, Inc. * Unified SMC91x driver by Nicolas Pitre * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Arguments: * io = for the base address * irq = for the IRQ * nowait = 0 for normal wait states, 1 eliminates additional wait states * * original author: * Erik Stahlman <erik@vt.edu> * * hardware multicast code: * Peter Cammaert <pc@denkart.be> * * contributors: * Daris A Nevil <dnevil@snmc.com> * Nicolas Pitre <nico@fluxnic.net> * Russell King <rmk@arm.linux.org.uk> * * History: * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111 * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111 * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111 * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races, * more bus abstraction, big cleanup, etc. * 29/09/03 Russell King - add driver model support * - ethtool support * - convert to use generic MII interface * - add link up/down notification * - don't try to handle full negotiation in * smc_phy_configure * - clean up (and fix stack overrun) in PHY * MII read/write functions * 22/09/04 Nicolas Pitre big update (see commit log for details) */ static const char version[] = "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n"; /* Debugging level */ #ifndef SMC_DEBUG #define SMC_DEBUG 0 #endif #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include "smc91x.h" #ifndef SMC_NOWAIT # define SMC_NOWAIT 0 #endif static int nowait = SMC_NOWAIT; module_param(nowait, int, 0400); MODULE_PARM_DESC(nowait, "set to 1 for no wait state"); /* * Transmit timeout, default 5 seconds. */ static int watchdog = 1000; module_param(watchdog, int, 0400); MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:smc91x"); /* * The internal workings of the driver. If you are changing anything * here with the SMC stuff, you should have the datasheet and know * what you are doing. */ #define CARDNAME "smc91x" /* * Use power-down feature of the chip */ #define POWER_DOWN 1 /* * Wait time for memory to be free. This probably shouldn't be * tuned that much, as waiting for this means nothing else happens * in the system */ #define MEMORY_WAIT_TIME 16 /* * The maximum number of processing loops allowed for each call to the * IRQ handler. */ #define MAX_IRQ_LOOPS 8 /* * This selects whether TX packets are sent one by one to the SMC91x internal * memory and throttled until transmission completes. This may prevent * RX overruns a litle by keeping much of the memory free for RX packets * but to the expense of reduced TX throughput and increased IRQ overhead. * Note this is not a cure for a too slow data bus or too high IRQ latency. */ #define THROTTLE_TX_PKTS 0 /* * The MII clock high/low times. 2x this number gives the MII clock period * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!) */ #define MII_DELAY 1 #if SMC_DEBUG > 0 #define DBG(n, args...) \ do { \ if (SMC_DEBUG >= (n)) \ printk(args); \ } while (0) #define PRINTK(args...) printk(args) #else #define DBG(n, args...) do { } while(0) #define PRINTK(args...) printk(KERN_DEBUG args) #endif #if SMC_DEBUG > 3 static void PRINT_PKT(u_char *buf, int length) { int i; int remainder; int lines; lines = length / 16; remainder = length % 16; for (i = 0; i < lines ; i ++) { int cur; for (cur = 0; cur < 8; cur++) { u_char a, b; a = *buf++; b = *buf++; printk("%02x%02x ", a, b); } printk("\n"); } for (i = 0; i < remainder/2 ; i++) { u_char a, b; a = *buf++; b = *buf++; printk("%02x%02x ", a, b); } printk("\n"); } #else #define PRINT_PKT(x...) do { } while(0) #endif /* this enables an interrupt in the interrupt mask register */ #define SMC_ENABLE_INT(lp, x) do { \ unsigned char mask; \ unsigned long smc_enable_flags; \ spin_lock_irqsave(&lp->lock, smc_enable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask |= (x); \ SMC_SET_INT_MASK(lp, mask); \ spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ } while (0) /* this disables an interrupt from the interrupt mask register */ #define SMC_DISABLE_INT(lp, x) do { \ unsigned char mask; \ unsigned long smc_disable_flags; \ spin_lock_irqsave(&lp->lock, smc_disable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask &= ~(x); \ SMC_SET_INT_MASK(lp, mask); \ spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ } while (0) /* * Wait while MMU is busy. This is usually in the order of a few nanosecs * if at all, but let's avoid deadlocking the system if the hardware * decides to go south. */ #define SMC_WAIT_MMU_BUSY(lp) do { \ if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \ unsigned long timeout = jiffies + 2; \ while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \ if (time_after(jiffies, timeout)) { \ printk("%s: timeout %s line %d\n", \ dev->name, __FILE__, __LINE__); \ break; \ } \ cpu_relax(); \ } \ } \ } while (0) /* * this does a soft reset on the device */ static void smc_reset(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int ctl, cfg; struct sk_buff *pending_skb; DBG(2, "%s: %s\n", dev->name, __func__); /* Disable all interrupts, block TX tasklet */ spin_lock_irq(&lp->lock); SMC_SELECT_BANK(lp, 2); SMC_SET_INT_MASK(lp, 0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; spin_unlock_irq(&lp->lock); /* free any pending tx skb */ if (pending_skb) { dev_kfree_skb(pending_skb); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; } /* * This resets the registers mostly to defaults, but doesn't * affect EEPROM. That seems unnecessary */ SMC_SELECT_BANK(lp, 0); SMC_SET_RCR(lp, RCR_SOFTRST); /* * Setup the Configuration Register * This is necessary because the CONFIG_REG is not affected * by a soft reset */ SMC_SELECT_BANK(lp, 1); cfg = CONFIG_DEFAULT; /* * Setup for fast accesses if requested. If the card/system * can't handle it then there will be no recovery except for * a hard reset or power cycle */ if (lp->cfg.flags & SMC91X_NOWAIT) cfg |= CONFIG_NO_WAIT; /* * Release from possible power-down state * Configuration register is not affected by Soft Reset */ cfg |= CONFIG_EPH_POWER_EN; SMC_SET_CONFIG(lp, cfg); /* this should pause enough for the chip to be happy */ /* * elaborate? What does the chip _need_? --jgarzik * * This seems to be undocumented, but something the original * driver(s) have always done. Suspect undocumented timing * info/determined empirically. --rmk */ udelay(1); /* Disable transmit and receive functionality */ SMC_SELECT_BANK(lp, 0); SMC_SET_RCR(lp, RCR_CLEAR); SMC_SET_TCR(lp, TCR_CLEAR); SMC_SELECT_BANK(lp, 1); ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE; /* * Set the control register to automatically release successfully * transmitted packets, to make the best use out of our limited * memory */ if(!THROTTLE_TX_PKTS) ctl |= CTL_AUTO_RELEASE; else ctl &= ~CTL_AUTO_RELEASE; SMC_SET_CTL(lp, ctl); /* Reset the MMU */ SMC_SELECT_BANK(lp, 2); SMC_SET_MMU_CMD(lp, MC_RESET); SMC_WAIT_MMU_BUSY(lp); } /* * Enable Interrupts, Receive, and Transmit */ static void smc_enable(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int mask; DBG(2, "%s: %s\n", dev->name, __func__); /* see the header file for options in TCR/RCR DEFAULT */ SMC_SELECT_BANK(lp, 0); SMC_SET_TCR(lp, lp->tcr_cur_mode); SMC_SET_RCR(lp, lp->rcr_cur_mode); SMC_SELECT_BANK(lp, 1); SMC_SET_MAC_ADDR(lp, dev->dev_addr); /* now, enable interrupts */ mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; if (lp->version >= (CHIP_91100 << 4)) mask |= IM_MDINT; SMC_SELECT_BANK(lp, 2); SMC_SET_INT_MASK(lp, mask); /* * From this point the register bank must _NOT_ be switched away * to something else than bank 2 without proper locking against * races with any tasklet or interrupt handlers until smc_shutdown() * or smc_reset() is called. */ } /* * this puts the device in an inactive state */ static void smc_shutdown(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; struct sk_buff *pending_skb; DBG(2, "%s: %s\n", CARDNAME, __func__); /* no more interrupts for me */ spin_lock_irq(&lp->lock); SMC_SELECT_BANK(lp, 2); SMC_SET_INT_MASK(lp, 0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; spin_unlock_irq(&lp->lock); if (pending_skb) dev_kfree_skb(pending_skb); /* and tell the card to stay away from that nasty outside world */ SMC_SELECT_BANK(lp, 0); SMC_SET_RCR(lp, RCR_CLEAR); SMC_SET_TCR(lp, TCR_CLEAR); #ifdef POWER_DOWN /* finally, shut the chip down */ SMC_SELECT_BANK(lp, 1); SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN); #endif } /* * This is the procedure to handle the receipt of a packet. */ static inline void smc_rcv(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int packet_number, status, packet_len; DBG(3, "%s: %s\n", dev->name, __func__); packet_number = SMC_GET_RXFIFO(lp); if (unlikely(packet_number & RXFIFO_REMPTY)) { PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); return; } /* read from start of packet */ SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC); /* First two words are status and packet length */ SMC_GET_PKT_HDR(lp, status, packet_len); packet_len &= 0x07ff; /* mask off top bits */ DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", dev->name, packet_number, status, packet_len, packet_len); back: if (unlikely(packet_len < 6 || status & RS_ERRORS)) { if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) { /* accept VLAN packets */ status &= ~RS_TOOLONG; goto back; } if (packet_len < 6) { /* bloody hardware */ printk(KERN_ERR "%s: fubar (rxlen %u status %x\n", dev->name, packet_len, status); status |= RS_TOOSHORT; } SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_RELEASE); dev->stats.rx_errors++; if (status & RS_ALGNERR) dev->stats.rx_frame_errors++; if (status & (RS_TOOSHORT | RS_TOOLONG)) dev->stats.rx_length_errors++; if (status & RS_BADCRC) dev->stats.rx_crc_errors++; } else { struct sk_buff *skb; unsigned char *data; unsigned int data_len; /* set multicast stats */ if (status & RS_MULTICAST) dev->stats.multicast++; /* * Actual payload is packet_len - 6 (or 5 if odd byte). * We want skb_reserve(2) and the final ctrl word * (2 bytes, possibly containing the payload odd byte). * Furthermore, we add 2 bytes to allow rounding up to * multiple of 4 bytes on 32 bit buses. * Hence packet_len - 6 + 2 + 2 + 2. */ skb = netdev_alloc_skb(dev, packet_len); if (unlikely(skb == NULL)) { printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", dev->name); SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_RELEASE); dev->stats.rx_dropped++; return; } /* Align IP header to 32 bits */ skb_reserve(skb, 2); /* BUG: the LAN91C111 rev A never sets this bit. Force it. */ if (lp->version == 0x90) status |= RS_ODDFRAME; /* * If odd length: packet_len - 5, * otherwise packet_len - 6. * With the trailing ctrl byte it's packet_len - 4. */ data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6); data = skb_put(skb, data_len); SMC_PULL_DATA(lp, data, packet_len - 4); SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_RELEASE); PRINT_PKT(data, packet_len - 4); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += data_len; } } #ifdef CONFIG_SMP /* * On SMP we have the following problem: * * A = smc_hardware_send_pkt() * B = smc_hard_start_xmit() * C = smc_interrupt() * * A and B can never be executed simultaneously. However, at least on UP, * it is possible (and even desirable) for C to interrupt execution of * A or B in order to have better RX reliability and avoid overruns. * C, just like A and B, must have exclusive access to the chip and * each of them must lock against any other concurrent access. * Unfortunately this is not possible to have C suspend execution of A or * B taking place on another CPU. On UP this is no an issue since A and B * are run from softirq context and C from hard IRQ context, and there is * no other CPU where concurrent access can happen. * If ever there is a way to force at least B and C to always be executed * on the same CPU then we could use read/write locks to protect against * any other concurrent access and C would always interrupt B. But life * isn't that easy in a SMP world... */ #define smc_special_trylock(lock, flags) \ ({ \ int __ret; \ local_irq_save(flags); \ __ret = spin_trylock(lock); \ if (!__ret) \ local_irq_restore(flags); \ __ret; \ }) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else #define smc_special_trylock(lock, flags) (flags == flags) #define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #endif /* * This is called to actually send a packet to the chip. */ static void smc_hardware_send_pkt(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; struct sk_buff *skb; unsigned int packet_no, len; unsigned char *buf; unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); if (!smc_special_trylock(&lp->lock, flags)) { netif_stop_queue(dev); tasklet_schedule(&lp->tx_task); return; } skb = lp->pending_tx_skb; if (unlikely(!skb)) { smc_special_unlock(&lp->lock, flags); return; } lp->pending_tx_skb = NULL; packet_no = SMC_GET_AR(lp); if (unlikely(packet_no & AR_FAILED)) { printk("%s: Memory allocation failed.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; smc_special_unlock(&lp->lock, flags); goto done; } /* point to the beginning of the packet */ SMC_SET_PN(lp, packet_no); SMC_SET_PTR(lp, PTR_AUTOINC); buf = skb->data; len = skb->len; DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", dev->name, packet_no, len, len, buf); PRINT_PKT(buf, len); /* * Send the packet length (+6 for status words, length, and ctl. * The card will pad to 64 bytes with zeroes if packet is too small. */ SMC_PUT_PKT_HDR(lp, 0, len + 6); /* send the actual data */ SMC_PUSH_DATA(lp, buf, len & ~1); /* Send final ctl word with the last byte if there is one */ SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp)); /* * If THROTTLE_TX_PKTS is set, we stop the queue here. This will * have the effect of having at most one packet queued for TX * in the chip's memory at all time. * * If THROTTLE_TX_PKTS is not set then the queue is stopped only * when memory allocation (MC_ALLOC) does not succeed right away. */ if (THROTTLE_TX_PKTS) netif_stop_queue(dev); /* queue the packet for TX */ SMC_SET_MMU_CMD(lp, MC_ENQUEUE); smc_special_unlock(&lp->lock, flags); dev->trans_start = jiffies; dev->stats.tx_packets++; dev->stats.tx_bytes += len; SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT); done: if (!THROTTLE_TX_PKTS) netif_wake_queue(dev); dev_kfree_skb(skb); } /* * Since I am not sure if I will have enough room in the chip's ram * to store the packet, I call this routine which either sends it * now, or set the card to generates an interrupt when ready * for the packet. */ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int numPages, poll_count, status; unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); BUG_ON(lp->pending_tx_skb != NULL); /* * The MMU wants the number of pages to be the number of 256 bytes * 'pages', minus 1 (since a packet can't ever have 0 pages :)) * * The 91C111 ignores the size bits, but earlier models don't. * * Pkt size for allocating is data length +6 (for additional status * words, length and ctl) * * If odd size then last byte is included in ctl word. */ numPages = ((skb->len & ~1) + (6 - 1)) >> 8; if (unlikely(numPages > 7)) { printk("%s: Far too big packet error.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } smc_special_lock(&lp->lock, flags); /* now, try to allocate the memory */ SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); /* * Poll the chip for a short amount of time in case the * allocation succeeds quickly. */ poll_count = MEMORY_WAIT_TIME; do { status = SMC_GET_INT(lp); if (status & IM_ALLOC_INT) { SMC_ACK_INT(lp, IM_ALLOC_INT); break; } } while (--poll_count); smc_special_unlock(&lp->lock, flags); lp->pending_tx_skb = skb; if (!poll_count) { /* oh well, wait until the chip finds memory later */ netif_stop_queue(dev); DBG(2, "%s: TX memory allocation deferred.\n", dev->name); SMC_ENABLE_INT(lp, IM_ALLOC_INT); } else { /* * Allocation succeeded: push packet to the chip's own memory * immediately. */ smc_hardware_send_pkt((unsigned long)dev); } return NETDEV_TX_OK; } /* * This handles a TX interrupt, which is only called when: * - a TX error occurred, or * - CTL_AUTO_RELEASE is not set and TX of a packet completed. */ static void smc_tx(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int saved_packet, packet_no, tx_status, pkt_len; DBG(3, "%s: %s\n", dev->name, __func__); /* If the TX FIFO is empty then nothing to do */ packet_no = SMC_GET_TXFIFO(lp); if (unlikely(packet_no & TXFIFO_TEMPTY)) { PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); return; } /* select packet to read from */ saved_packet = SMC_GET_PN(lp); SMC_SET_PN(lp, packet_no); /* read the first word (status word) from this packet */ SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ); SMC_GET_PKT_HDR(lp, tx_status, pkt_len); DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", dev->name, tx_status, packet_no); if (!(tx_status & ES_TX_SUC)) dev->stats.tx_errors++; if (tx_status & ES_LOSTCARR) dev->stats.tx_carrier_errors++; if (tx_status & (ES_LATCOL | ES_16COL)) { PRINTK("%s: %s occurred on last xmit\n", dev->name, (tx_status & ES_LATCOL) ? "late collision" : "too many collisions"); dev->stats.tx_window_errors++; if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { printk(KERN_INFO "%s: unexpectedly large number of " "bad collisions. Please check duplex " "setting.\n", dev->name); } } /* kill the packet */ SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_FREEPKT); /* Don't restore Packet Number Reg until busy bit is cleared */ SMC_WAIT_MMU_BUSY(lp); SMC_SET_PN(lp, saved_packet); /* re-enable transmit */ SMC_SELECT_BANK(lp, 0); SMC_SET_TCR(lp, lp->tcr_cur_mode); SMC_SELECT_BANK(lp, 2); } /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ static void smc_mii_out(struct net_device *dev, unsigned int val, int bits) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int mii_reg, mask; mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); mii_reg |= MII_MDOE; for (mask = 1 << (bits - 1); mask; mask >>= 1) { if (val & mask) mii_reg |= MII_MDO; else mii_reg &= ~MII_MDO; SMC_SET_MII(lp, mii_reg); udelay(MII_DELAY); SMC_SET_MII(lp, mii_reg | MII_MCLK); udelay(MII_DELAY); } } static unsigned int smc_mii_in(struct net_device *dev, int bits) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int mii_reg, mask, val; mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); SMC_SET_MII(lp, mii_reg); for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { if (SMC_GET_MII(lp) & MII_MDI) val |= mask; SMC_SET_MII(lp, mii_reg); udelay(MII_DELAY); SMC_SET_MII(lp, mii_reg | MII_MCLK); udelay(MII_DELAY); } return val; } /* * Reads a register from the MII Management serial interface */ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int phydata; SMC_SELECT_BANK(lp, 3); /* Idle - 32 ones */ smc_mii_out(dev, 0xffffffff, 32); /* Start code (01) + read (10) + phyaddr + phyreg */ smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14); /* Turnaround (2bits) + phydata */ phydata = smc_mii_in(dev, 18); /* Return to idle state */ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", __func__, phyaddr, phyreg, phydata); SMC_SELECT_BANK(lp, 2); return phydata; } /* * Writes a register to the MII Management serial interface */ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, int phydata) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; SMC_SELECT_BANK(lp, 3); /* Idle - 32 ones */ smc_mii_out(dev, 0xffffffff, 32); /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */ smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); /* Return to idle state */ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", __func__, phyaddr, phyreg, phydata); SMC_SELECT_BANK(lp, 2); } /* * Finds and reports the PHY address */ static void smc_phy_detect(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); int phyaddr; DBG(2, "%s: %s\n", dev->name, __func__); lp->phy_type = 0; /* * Scan all 32 PHY addresses if necessary, starting at * PHY#1 to PHY#31, and then PHY#0 last. */ for (phyaddr = 1; phyaddr < 33; ++phyaddr) { unsigned int id1, id2; /* Read the PHY identifiers */ id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n", dev->name, id1, id2); /* Make sure it is a valid identifier */ if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { /* Save the PHY's address */ lp->mii.phy_id = phyaddr & 31; lp->phy_type = id1 << 16 | id2; break; } } } /* * Sets the PHY to a configuration as determined by the user */ static int smc_phy_fixed(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int phyaddr = lp->mii.phy_id; int bmcr, cfg1; DBG(3, "%s: %s\n", dev->name, __func__); /* Enter Link Disable state */ cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); cfg1 |= PHY_CFG1_LNKDIS; smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1); /* * Set our fixed capabilities * Disable auto-negotiation */ bmcr = 0; if (lp->ctl_rfduplx) bmcr |= BMCR_FULLDPLX; if (lp->ctl_rspeed == 100) bmcr |= BMCR_SPEED100; /* Write our capabilities to the phy control register */ smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); /* Re-Configure the Receive/Phy Control register */ SMC_SELECT_BANK(lp, 0); SMC_SET_RPC(lp, lp->rpc_cur_mode); SMC_SELECT_BANK(lp, 2); return 1; } /* * smc_phy_reset - reset the phy * @dev: net device * @phy: phy address * * Issue a software reset for the specified PHY and * wait up to 100ms for the reset to complete. We should * not access the PHY for 50ms after issuing the reset. * * The time to wait appears to be dependent on the PHY. * * Must be called with lp->lock locked. */ static int smc_phy_reset(struct net_device *dev, int phy) { struct smc_local *lp = netdev_priv(dev); unsigned int bmcr; int timeout; smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET); for (timeout = 2; timeout; timeout--) { spin_unlock_irq(&lp->lock); msleep(50); spin_lock_irq(&lp->lock); bmcr = smc_phy_read(dev, phy, MII_BMCR); if (!(bmcr & BMCR_RESET)) break; } return bmcr & BMCR_RESET; } /* * smc_phy_powerdown - powerdown phy * @dev: net device * * Power down the specified PHY */ static void smc_phy_powerdown(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); unsigned int bmcr; int phy = lp->mii.phy_id; if (lp->phy_type == 0) return; /* We need to ensure that no calls to smc_phy_configure are pending. */ cancel_work_sync(&lp->phy_configure); bmcr = smc_phy_read(dev, phy, MII_BMCR); smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); } /* * smc_phy_check_media - check the media status and adjust TCR * @dev: net device * @init: set true for initialisation * * Select duplex mode depending on negotiation state. This * also updates our carrier state. */ static void smc_phy_check_media(struct net_device *dev, int init) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { /* duplex state has changed */ if (lp->mii.full_duplex) { lp->tcr_cur_mode |= TCR_SWFDUP; } else { lp->tcr_cur_mode &= ~TCR_SWFDUP; } SMC_SELECT_BANK(lp, 0); SMC_SET_TCR(lp, lp->tcr_cur_mode); } } /* * Configures the specified PHY through the MII management interface * using Autonegotiation. * Calls smc_phy_fixed() if the user has requested a certain config. * If RPC ANEG bit is set, the media selection is dependent purely on * the selection by the MII (either in the MII BMCR reg or the result * of autonegotiation.) If the RPC ANEG bit is cleared, the selection * is controlled by the RPC SPEED and RPC DPLX bits. */ static void smc_phy_configure(struct work_struct *work) { struct smc_local *lp = container_of(work, struct smc_local, phy_configure); struct net_device *dev = lp->dev; void __iomem *ioaddr = lp->base; int phyaddr = lp->mii.phy_id; int my_phy_caps; /* My PHY capabilities */ int my_ad_caps; /* My Advertised capabilities */ int status; DBG(3, "%s:smc_program_phy()\n", dev->name); spin_lock_irq(&lp->lock); /* * We should not be called if phy_type is zero. */ if (lp->phy_type == 0) goto smc_phy_configure_exit; if (smc_phy_reset(dev, phyaddr)) { printk("%s: PHY reset timed out\n", dev->name); goto smc_phy_configure_exit; } /* * Enable PHY Interrupts (for register 18) * Interrupts listed here are disabled */ smc_phy_write(dev, phyaddr, PHY_MASK_REG, PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD | PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB | PHY_INT_SPDDET | PHY_INT_DPLXDET); /* Configure the Receive/Phy Control register */ SMC_SELECT_BANK(lp, 0); SMC_SET_RPC(lp, lp->rpc_cur_mode); /* If the user requested no auto neg, then go set his request */ if (lp->mii.force_media) { smc_phy_fixed(dev); goto smc_phy_configure_exit; } /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { printk(KERN_INFO "Auto negotiation NOT supported\n"); smc_phy_fixed(dev); goto smc_phy_configure_exit; } my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */ if (my_phy_caps & BMSR_100BASE4) my_ad_caps |= ADVERTISE_100BASE4; if (my_phy_caps & BMSR_100FULL) my_ad_caps |= ADVERTISE_100FULL; if (my_phy_caps & BMSR_100HALF) my_ad_caps |= ADVERTISE_100HALF; if (my_phy_caps & BMSR_10FULL) my_ad_caps |= ADVERTISE_10FULL; if (my_phy_caps & BMSR_10HALF) my_ad_caps |= ADVERTISE_10HALF; /* Disable capabilities not selected by our user */ if (lp->ctl_rspeed != 100) my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); if (!lp->ctl_rfduplx) my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); /* Update our Auto-Neg Advertisement Register */ smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps); lp->mii.advertising = my_ad_caps; /* * Read the register back. Without this, it appears that when * auto-negotiation is restarted, sometimes it isn't ready and * the link does not come up. */ status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps); DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps); /* Restart auto-negotiation process in order to advertise my caps */ smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); smc_phy_check_media(dev, 1); smc_phy_configure_exit: SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); } /* * smc_phy_interrupt * * Purpose: Handle interrupts relating to PHY register 18. This is * called from the "hard" interrupt handler under our private spinlock. */ static void smc_phy_interrupt(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); int phyaddr = lp->mii.phy_id; int phy18; DBG(2, "%s: %s\n", dev->name, __func__); if (lp->phy_type == 0) return; for(;;) { smc_phy_check_media(dev, 0); /* Read PHY Register 18, Status Output */ phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG); if ((phy18 & PHY_INT_INT) == 0) break; } } /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ static void smc_10bt_check_media(struct net_device *dev, int init) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int old_carrier, new_carrier; old_carrier = netif_carrier_ok(dev) ? 1 : 0; SMC_SELECT_BANK(lp, 0); new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0; SMC_SELECT_BANK(lp, 2); if (init || (old_carrier != new_carrier)) { if (!new_carrier) { netif_carrier_off(dev); } else { netif_carrier_on(dev); } if (netif_msg_link(lp)) printk(KERN_INFO "%s: link %s\n", dev->name, new_carrier ? "up" : "down"); } } static void smc_eph_interrupt(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int ctl; smc_10bt_check_media(dev, 0); SMC_SELECT_BANK(lp, 1); ctl = SMC_GET_CTL(lp); SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE); SMC_SET_CTL(lp, ctl); SMC_SELECT_BANK(lp, 2); } /* * This is the main routine of the driver, to handle the device when * it needs some attention. */ static irqreturn_t smc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int status, mask, timeout, card_stats; int saved_pointer; DBG(3, "%s: %s\n", dev->name, __func__); spin_lock(&lp->lock); /* A preamble may be used when there is a potential race * between the interruptible transmit functions and this * ISR. */ SMC_INTERRUPT_PREAMBLE; saved_pointer = SMC_GET_PTR(lp); mask = SMC_GET_INT_MASK(lp); SMC_SET_INT_MASK(lp, 0); /* set a timeout value, so I don't stay here forever */ timeout = MAX_IRQ_LOOPS; do { status = SMC_GET_INT(lp); DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", dev->name, status, mask, ({ int meminfo; SMC_SELECT_BANK(lp, 0); meminfo = SMC_GET_MIR(lp); SMC_SELECT_BANK(lp, 2); meminfo; }), SMC_GET_FIFO(lp)); status &= mask; if (!status) break; if (status & IM_TX_INT) { /* do this before RX as it will free memory quickly */ DBG(3, "%s: TX int\n", dev->name); smc_tx(dev); SMC_ACK_INT(lp, IM_TX_INT); if (THROTTLE_TX_PKTS) netif_wake_queue(dev); } else if (status & IM_RCV_INT) { DBG(3, "%s: RX irq\n", dev->name); smc_rcv(dev); } else if (status & IM_ALLOC_INT) { DBG(3, "%s: Allocation irq\n", dev->name); tasklet_hi_schedule(&lp->tx_task); mask &= ~IM_ALLOC_INT; } else if (status & IM_TX_EMPTY_INT) { DBG(3, "%s: TX empty\n", dev->name); mask &= ~IM_TX_EMPTY_INT; /* update stats */ SMC_SELECT_BANK(lp, 0); card_stats = SMC_GET_COUNTER(lp); SMC_SELECT_BANK(lp, 2); /* single collisions */ dev->stats.collisions += card_stats & 0xF; card_stats >>= 4; /* multiple collisions */ dev->stats.collisions += card_stats & 0xF; } else if (status & IM_RX_OVRN_INT) { DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, ({ int eph_st; SMC_SELECT_BANK(lp, 0); eph_st = SMC_GET_EPH_STATUS(lp); SMC_SELECT_BANK(lp, 2); eph_st; })); SMC_ACK_INT(lp, IM_RX_OVRN_INT); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; } else if (status & IM_EPH_INT) { smc_eph_interrupt(dev); } else if (status & IM_MDINT) { SMC_ACK_INT(lp, IM_MDINT); smc_phy_interrupt(dev); } else if (status & IM_ERCV_INT) { SMC_ACK_INT(lp, IM_ERCV_INT); PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name); } } while (--timeout); /* restore register states */ SMC_SET_PTR(lp, saved_pointer); SMC_SET_INT_MASK(lp, mask); spin_unlock(&lp->lock); #ifndef CONFIG_NET_POLL_CONTROLLER if (timeout == MAX_IRQ_LOOPS) PRINTK("%s: spurious interrupt (mask = 0x%02x)\n", dev->name, mask); #endif DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, MAX_IRQ_LOOPS - timeout); /* * We return IRQ_HANDLED unconditionally here even if there was * nothing to do. There is a possibility that a packet might * get enqueued into the chip right after TX_EMPTY_INT is raised * but just before the CPU acknowledges the IRQ. * Better take an unneeded IRQ in some occasions than complexifying * the code for all cases. */ return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void smc_poll_controller(struct net_device *dev) { disable_irq(dev->irq); smc_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /* Our watchdog timed out. Called by the networking layer */ static void smc_timeout(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int status, mask, eph_st, meminfo, fifo; DBG(2, "%s: %s\n", dev->name, __func__); spin_lock_irq(&lp->lock); status = SMC_GET_INT(lp); mask = SMC_GET_INT_MASK(lp); fifo = SMC_GET_FIFO(lp); SMC_SELECT_BANK(lp, 0); eph_st = SMC_GET_EPH_STATUS(lp); meminfo = SMC_GET_MIR(lp); SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x " "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", dev->name, status, mask, meminfo, fifo, eph_st ); smc_reset(dev); smc_enable(dev); /* * Reconfiguring the PHY doesn't seem like a bad idea here, but * smc_phy_configure() calls msleep() which calls schedule_timeout() * which calls schedule(). Hence we use a work queue. */ if (lp->phy_type != 0) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * This routine will, depending on the values passed to it, * either make it accept multicast packets, go into * promiscuous mode (for TCPDUMP and cousins) or accept * a select set of multicast packets */ static void smc_set_multicast_list(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned char multicast_table[8]; int update_multicast = 0; DBG(2, "%s: %s\n", dev->name, __func__); if (dev->flags & IFF_PROMISC) { DBG(2, "%s: RCR_PRMS\n", dev->name); lp->rcr_cur_mode |= RCR_PRMS; } /* BUG? I never disable promiscuous mode if multicasting was turned on. Now, I turn off promiscuous mode, but I don't do anything to multicasting when promiscuous mode is turned on. */ /* * Here, I am setting this to accept all multicast packets. * I don't need to zero the multicast table, because the flag is * checked before the table is */ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { DBG(2, "%s: RCR_ALMUL\n", dev->name); lp->rcr_cur_mode |= RCR_ALMUL; } /* * This sets the internal hardware table to filter out unwanted * multicast packets before they take up memory. * * The SMC chip uses a hash table where the high 6 bits of the CRC of * address are the offset into the table. If that bit is 1, then the * multicast packet is accepted. Otherwise, it's dropped silently. * * To use the 6 bits as an offset into the table, the high 3 bits are * the number of the 8 bit register, while the low 3 bits are the bit * within that register. */ else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; /* table for flipping the order of 3 bits */ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); netdev_for_each_mc_addr(ha, dev) { int position; /* only use the low order bits */ position = crc32_le(~0, ha->addr, 6) & 0x3f; /* do some messy swapping to put the bit in the right spot */ multicast_table[invert3[position&7]] |= (1<<invert3[(position>>3)&7]); } /* be sure I get rid of flags I might have set */ lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); /* now, the table can be loaded into the chipset */ update_multicast = 1; } else { DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name); lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); /* * since I'm disabling all multicast entirely, I need to * clear the multicast list */ memset(multicast_table, 0, sizeof(multicast_table)); update_multicast = 1; } spin_lock_irq(&lp->lock); SMC_SELECT_BANK(lp, 0); SMC_SET_RCR(lp, lp->rcr_cur_mode); if (update_multicast) { SMC_SELECT_BANK(lp, 3); SMC_SET_MCAST(lp, multicast_table); } SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); } /* * Open and Initialize the board * * Set up everything, reset the card, etc.. */ static int smc_open(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); DBG(2, "%s: %s\n", dev->name, __func__); /* * Check that the address is valid. If its not, refuse * to bring the device up. The user must specify an * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { PRINTK("%s: no valid ethernet hw addr\n", __func__); return -EINVAL; } /* Setup the default Register Modes */ lp->tcr_cur_mode = TCR_DEFAULT; lp->rcr_cur_mode = RCR_DEFAULT; lp->rpc_cur_mode = RPC_DEFAULT | lp->cfg.leda << RPC_LSXA_SHFT | lp->cfg.ledb << RPC_LSXB_SHFT; /* * If we are not using a MII interface, we need to * monitor our own carrier signal to detect faults. */ if (lp->phy_type == 0) lp->tcr_cur_mode |= TCR_MON_CSN; /* reset the hardware */ smc_reset(dev); smc_enable(dev); /* Configure the PHY, initialize the link state */ if (lp->phy_type != 0) smc_phy_configure(&lp->phy_configure); else { spin_lock_irq(&lp->lock); smc_10bt_check_media(dev, 1); spin_unlock_irq(&lp->lock); } netif_start_queue(dev); return 0; } /* * smc_close * * this makes the board clean up everything that it can * and not talk to the outside world. Caused by * an 'ifconfig ethX down' */ static int smc_close(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); DBG(2, "%s: %s\n", dev->name, __func__); netif_stop_queue(dev); netif_carrier_off(dev); /* clear everything */ smc_shutdown(dev); tasklet_kill(&lp->tx_task); smc_phy_powerdown(dev); return 0; } /* * Ethtool support */ static int smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) { struct smc_local *lp = netdev_priv(dev); int ret; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); ret = mii_ethtool_gset(&lp->mii, cmd); spin_unlock_irq(&lp->lock); } else { cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP | SUPPORTED_AUI; if (lp->ctl_rspeed == 10) ethtool_cmd_speed_set(cmd, SPEED_10); else if (lp->ctl_rspeed == 100) ethtool_cmd_speed_set(cmd, SPEED_100); cmd->autoneg = AUTONEG_DISABLE; cmd->transceiver = XCVR_INTERNAL; cmd->port = 0; cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; ret = 0; } return ret; } static int smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) { struct smc_local *lp = netdev_priv(dev); int ret; if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); ret = mii_ethtool_sset(&lp->mii, cmd); spin_unlock_irq(&lp->lock); } else { if (cmd->autoneg != AUTONEG_DISABLE || cmd->speed != SPEED_10 || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || (cmd->port != PORT_TP && cmd->port != PORT_AUI)) return -EINVAL; // lp->port = cmd->port; lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; // if (netif_running(dev)) // smc_set_port(dev); ret = 0; } return ret; } static void smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strncpy(info->driver, CARDNAME, sizeof(info->driver)); strncpy(info->version, version, sizeof(info->version)); strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int smc_ethtool_nwayreset(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); int ret = -EINVAL; if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); ret = mii_nway_restart(&lp->mii); spin_unlock_irq(&lp->lock); } return ret; } static u32 smc_ethtool_getmsglevel(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); return lp->msg_enable; } static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) { struct smc_local *lp = netdev_priv(dev); lp->msg_enable = level; } static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) { u16 ctl; struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; spin_lock_irq(&lp->lock); /* load word into GP register */ SMC_SELECT_BANK(lp, 1); SMC_SET_GP(lp, word); /* set the address to put the data in EEPROM */ SMC_SELECT_BANK(lp, 2); SMC_SET_PTR(lp, addr); /* tell it to write */ SMC_SELECT_BANK(lp, 1); ctl = SMC_GET_CTL(lp); SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); /* wait for it to finish */ do { udelay(1); } while (SMC_GET_CTL(lp) & CTL_STORE); /* clean up */ SMC_SET_CTL(lp, ctl); SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); return 0; } static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) { u16 ctl; struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; spin_lock_irq(&lp->lock); /* set the EEPROM address to get the data from */ SMC_SELECT_BANK(lp, 2); SMC_SET_PTR(lp, addr | PTR_READ); /* tell it to load */ SMC_SELECT_BANK(lp, 1); SMC_SET_GP(lp, 0xffff); /* init to known */ ctl = SMC_GET_CTL(lp); SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); /* wait for it to finish */ do { udelay(1); } while (SMC_GET_CTL(lp) & CTL_RELOAD); /* read word from GP register */ *word = SMC_GET_GP(lp); /* clean up */ SMC_SET_CTL(lp, ctl); SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); return 0; } static int smc_ethtool_geteeprom_len(struct net_device *dev) { return 0x23 * 2; } static int smc_ethtool_geteeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { int i; int imax; DBG(1, "Reading %d bytes at %d(0x%x)\n", eeprom->len, eeprom->offset, eeprom->offset); imax = smc_ethtool_geteeprom_len(dev); for (i = 0; i < eeprom->len; i += 2) { int ret; u16 wbuf; int offset = i + eeprom->offset; if (offset > imax) break; ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); if (ret != 0) return ret; DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); data[i] = (wbuf >> 8) & 0xff; data[i+1] = wbuf & 0xff; } return 0; } static int smc_ethtool_seteeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { int i; int imax; DBG(1, "Writing %d bytes to %d(0x%x)\n", eeprom->len, eeprom->offset, eeprom->offset); imax = smc_ethtool_geteeprom_len(dev); for (i = 0; i < eeprom->len; i += 2) { int ret; u16 wbuf; int offset = i + eeprom->offset; if (offset > imax) break; wbuf = (data[i] << 8) | data[i + 1]; DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); if (ret != 0) return ret; } return 0; } static const struct ethtool_ops smc_ethtool_ops = { .get_settings = smc_ethtool_getsettings, .set_settings = smc_ethtool_setsettings, .get_drvinfo = smc_ethtool_getdrvinfo, .get_msglevel = smc_ethtool_getmsglevel, .set_msglevel = smc_ethtool_setmsglevel, .nway_reset = smc_ethtool_nwayreset, .get_link = ethtool_op_get_link, .get_eeprom_len = smc_ethtool_geteeprom_len, .get_eeprom = smc_ethtool_geteeprom, .set_eeprom = smc_ethtool_seteeprom, }; static const struct net_device_ops smc_netdev_ops = { .ndo_open = smc_open, .ndo_stop = smc_close, .ndo_start_xmit = smc_hard_start_xmit, .ndo_tx_timeout = smc_timeout, .ndo_set_rx_mode = smc_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = smc_poll_controller, #endif }; /* * smc_findirq * * This routine has a simple purpose -- make the SMC chip generate an * interrupt, so an auto-detect routine can detect it, and find the IRQ, */ /* * does this still work? * * I just deleted auto_irq.c, since it was never built... * --jgarzik */ static int __devinit smc_findirq(struct smc_local *lp) { void __iomem *ioaddr = lp->base; int timeout = 20; unsigned long cookie; DBG(2, "%s: %s\n", CARDNAME, __func__); cookie = probe_irq_on(); /* * What I try to do here is trigger an ALLOC_INT. This is done * by allocating a small chunk of memory, which will give an interrupt * when done. */ /* enable ALLOCation interrupts ONLY */ SMC_SELECT_BANK(lp, 2); SMC_SET_INT_MASK(lp, IM_ALLOC_INT); /* * Allocate 512 bytes of memory. Note that the chip was just * reset so all the memory is available */ SMC_SET_MMU_CMD(lp, MC_ALLOC | 1); /* * Wait until positive that the interrupt has been generated */ do { int int_status; udelay(10); int_status = SMC_GET_INT(lp); if (int_status & IM_ALLOC_INT) break; /* got the interrupt */ } while (--timeout); /* * there is really nothing that I can do here if timeout fails, * as autoirq_report will return a 0 anyway, which is what I * want in this case. Plus, the clean up is needed in both * cases. */ /* and disable all interrupts again */ SMC_SET_INT_MASK(lp, 0); /* and return what I found */ return probe_irq_off(cookie); } /* * Function: smc_probe(unsigned long ioaddr) * * Purpose: * Tests to see if a given ioaddr points to an SMC91x chip. * Returns a 0 on success * * Algorithm: * (1) see if the high byte of BANK_SELECT is 0x33 * (2) compare the ioaddr with the base register's address * (3) see if I recognize the chip ID in the appropriate register * * Here I do typical initialization tasks. * * o Initialize the structure if needed * o print out my vanity message if not done so already * o print out what type of hardware is detected * o print out the ethernet address * o find the IRQ * o set up my private data * o configure the dev structure with my subroutines * o actually GRAB the irq. * o GRAB the region */ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, unsigned long irq_flags) { struct smc_local *lp = netdev_priv(dev); static int version_printed = 0; int retval; unsigned int val, revision_register; const char *version_string; DBG(2, "%s: %s\n", CARDNAME, __func__); /* First, see if the high byte is 0x33 */ val = SMC_CURRENT_BANK(lp); DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); if ((val & 0xFF00) != 0x3300) { if ((val & 0xFF) == 0x33) { printk(KERN_WARNING "%s: Detected possible byte-swapped interface" " at IOADDR %p\n", CARDNAME, ioaddr); } retval = -ENODEV; goto err_out; } /* * The above MIGHT indicate a device, but I need to write to * further test this. */ SMC_SELECT_BANK(lp, 0); val = SMC_CURRENT_BANK(lp); if ((val & 0xFF00) != 0x3300) { retval = -ENODEV; goto err_out; } /* * well, we've already written once, so hopefully another * time won't hurt. This time, I need to switch the bank * register to bank 1, so I can access the base address * register */ SMC_SELECT_BANK(lp, 1); val = SMC_GET_BASE(lp); val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { printk("%s: IOADDR %p doesn't match configuration (%x).\n", CARDNAME, ioaddr, val); } /* * check if the revision register is something that I * recognize. These might need to be added to later, * as future revisions could be added. */ SMC_SELECT_BANK(lp, 3); revision_register = SMC_GET_REV(lp); DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register); version_string = chip_ids[ (revision_register >> 4) & 0xF]; if (!version_string || (revision_register & 0xff00) != 0x3300) { /* I don't recognize this chip, so... */ printk("%s: IO %p: Unrecognized revision register 0x%04x" ", Contact author.\n", CARDNAME, ioaddr, revision_register); retval = -ENODEV; goto err_out; } /* At this point I'll assume that the chip is an SMC91x. */ if (version_printed++ == 0) printk("%s", version); /* fill in some of the fields */ dev->base_addr = (unsigned long)ioaddr; lp->base = ioaddr; lp->version = revision_register & 0xff; spin_lock_init(&lp->lock); /* Get the MAC address */ SMC_SELECT_BANK(lp, 1); SMC_GET_MAC_ADDR(lp, dev->dev_addr); /* now, reset the chip, and put it into a known state */ smc_reset(dev); /* * If dev->irq is 0, then the device has to be banged on to see * what the IRQ is. * * This banging doesn't always detect the IRQ, for unknown reasons. * a workaround is to reset the chip and try again. * * Interestingly, the DOS packet driver *SETS* the IRQ on the card to * be what is requested on the command line. I don't do that, mostly * because the card that I have uses a non-standard method of accessing * the IRQs, and because this _should_ work in most configurations. * * Specifying an IRQ is done with the assumption that the user knows * what (s)he is doing. No checking is done!!!! */ if (dev->irq < 1) { int trials; trials = 3; while (trials--) { dev->irq = smc_findirq(lp); if (dev->irq) break; /* kick the card and try again */ smc_reset(dev); } } if (dev->irq == 0) { printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", dev->name); retval = -ENODEV; goto err_out; } dev->irq = irq_canonicalize(dev->irq); /* Fill in the fields of the device structure with ethernet values. */ ether_setup(dev); dev->watchdog_timeo = msecs_to_jiffies(watchdog); dev->netdev_ops = &smc_netdev_ops; dev->ethtool_ops = &smc_ethtool_ops; tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); INIT_WORK(&lp->phy_configure, smc_phy_configure); lp->dev = dev; lp->mii.phy_id_mask = 0x1f; lp->mii.reg_num_mask = 0x1f; lp->mii.force_media = 0; lp->mii.full_duplex = 0; lp->mii.dev = dev; lp->mii.mdio_read = smc_phy_read; lp->mii.mdio_write = smc_phy_write; /* * Locate the phy, if any. */ if (lp->version >= (CHIP_91100 << 4)) smc_phy_detect(dev); /* then shut everything down to save power */ smc_shutdown(dev); smc_phy_powerdown(dev); /* Set default parameters */ lp->msg_enable = NETIF_MSG_LINK; lp->ctl_rfduplx = 0; lp->ctl_rspeed = 10; if (lp->version >= (CHIP_91100 << 4)) { lp->ctl_rfduplx = 1; lp->ctl_rspeed = 100; } /* Grab the IRQ */ retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev); if (retval) goto err_out; #ifdef CONFIG_ARCH_PXA # ifdef SMC_USE_PXA_DMA lp->cfg.flags |= SMC91X_USE_DMA; # endif if (lp->cfg.flags & SMC91X_USE_DMA) { int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, smc_pxa_dma_irq, NULL); if (dma >= 0) dev->dma = dma; } #endif retval = register_netdev(dev); if (retval == 0) { /* now, print out the card info, in a short format.. */ printk("%s: %s (rev %d) at %p IRQ %d", dev->name, version_string, revision_register & 0x0f, lp->base, dev->irq); if (dev->dma != (unsigned char)-1) printk(" DMA %d", dev->dma); printk("%s%s\n", lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); if (!is_valid_ether_addr(dev->dev_addr)) { printk("%s: Invalid ethernet MAC address. Please " "set using ifconfig\n", dev->name); } else { /* Print the Ethernet address */ printk("%s: Ethernet addr: %pM\n", dev->name, dev->dev_addr); } if (lp->phy_type == 0) { PRINTK("%s: No PHY found\n", dev->name); } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name); } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { PRINTK("%s: PHY LAN83C180\n", dev->name); } } err_out: #ifdef CONFIG_ARCH_PXA if (retval && dev->dma != (unsigned char)-1) pxa_free_dma(dev->dma); #endif return retval; } static int smc_enable_device(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct smc_local *lp = netdev_priv(ndev); unsigned long flags; unsigned char ecor, ecsr; void __iomem *addr; struct resource * res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); if (!res) return 0; /* * Map the attribute space. This is overkill, but clean. */ addr = ioremap(res->start, ATTRIB_SIZE); if (!addr) return -ENOMEM; /* * Reset the device. We must disable IRQs around this * since a reset causes the IRQ line become active. */ local_irq_save(flags); ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET; writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT)); readb(addr + (ECOR << SMC_IO_SHIFT)); /* * Wait 100us for the chip to reset. */ udelay(100); /* * The device will ignore all writes to the enable bit while * reset is asserted, even if the reset bit is cleared in the * same write. Must clear reset first, then enable the device. */ writeb(ecor, addr + (ECOR << SMC_IO_SHIFT)); writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT)); /* * Set the appropriate byte/word mode. */ ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; if (!SMC_16BIT(lp)) ecsr |= ECSR_IOIS8; writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); local_irq_restore(flags); iounmap(addr); /* * Wait for the chip to wake up. We could poll the control * register in the main register space, but that isn't mapped * yet. We know this is going to take 750us. */ msleep(1); return 0; } static int smc_request_attrib(struct platform_device *pdev, struct net_device *ndev) { struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); struct smc_local *lp __maybe_unused = netdev_priv(ndev); if (!res) return 0; if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME)) return -EBUSY; return 0; } static void smc_release_attrib(struct platform_device *pdev, struct net_device *ndev) { struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); struct smc_local *lp __maybe_unused = netdev_priv(ndev); if (res) release_mem_region(res->start, ATTRIB_SIZE); } static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) { if (SMC_CAN_USE_DATACS) { struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); struct smc_local *lp = netdev_priv(ndev); if (!res) return; if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); return; } lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); } } static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) { if (SMC_CAN_USE_DATACS) { struct smc_local *lp = netdev_priv(ndev); struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); if (lp->datacs) iounmap(lp->datacs); lp->datacs = NULL; if (res) release_mem_region(res->start, SMC_DATA_EXTENT); } } /* * smc_init(void) * Input parameters: * dev->base_addr == 0, try to find all possible locations * dev->base_addr > 0x1ff, this is the address to check * dev->base_addr == <anything else>, return failure code * * Output: * 0 --> there is a device * anything else, error */ static int __devinit smc_drv_probe(struct platform_device *pdev) { struct smc91x_platdata *pd = pdev->dev.platform_data; struct smc_local *lp; struct net_device *ndev; struct resource *res, *ires; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); if (!ndev) { ret = -ENOMEM; goto out; } SET_NETDEV_DEV(ndev, &pdev->dev); /* get configuration from platform data, only allow use of * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set. */ lp = netdev_priv(ndev); if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); } else { lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0; } if (!lp->cfg.leda && !lp->cfg.ledb) { lp->cfg.leda = RPC_LSA_DEFAULT; lp->cfg.ledb = RPC_LSB_DEFAULT; } ndev->dma = (unsigned char)-1; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto out_free_netdev; } if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { ret = -EBUSY; goto out_free_netdev; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { ret = -ENODEV; goto out_release_io; } ndev->irq = ires->start; if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) irq_flags = ires->flags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) goto out_release_io; #if defined(CONFIG_SA1100_ASSABET) neponset_ncr_set(NCR_ENET_OSC_EN); #endif platform_set_drvdata(pdev, ndev); ret = smc_enable_device(pdev); if (ret) goto out_release_attrib; addr = ioremap(res->start, SMC_IO_EXTENT); if (!addr) { ret = -ENOMEM; goto out_release_attrib; } #ifdef CONFIG_ARCH_PXA { struct smc_local *lp = netdev_priv(ndev); lp->device = &pdev->dev; lp->physaddr = res->start; } #endif ret = smc_probe(ndev, addr, irq_flags); if (ret != 0) goto out_iounmap; smc_request_datacs(pdev, ndev); return 0; out_iounmap: platform_set_drvdata(pdev, NULL); iounmap(addr); out_release_attrib: smc_release_attrib(pdev, ndev); out_release_io: release_mem_region(res->start, SMC_IO_EXTENT); out_free_netdev: free_netdev(ndev); out: printk("%s: not found (%d).\n", CARDNAME, ret); return ret; } static int __devexit smc_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct smc_local *lp = netdev_priv(ndev); struct resource *res; platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); free_irq(ndev->irq, ndev); #ifdef CONFIG_ARCH_PXA if (ndev->dma != (unsigned char)-1) pxa_free_dma(ndev->dma); #endif iounmap(lp->base); smc_release_datacs(pdev,ndev); smc_release_attrib(pdev,ndev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, SMC_IO_EXTENT); free_netdev(ndev); return 0; } static int smc_drv_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); if (ndev) { if (netif_running(ndev)) { netif_device_detach(ndev); smc_shutdown(ndev); smc_phy_powerdown(ndev); } } return 0; } static int smc_drv_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); if (ndev) { struct smc_local *lp = netdev_priv(ndev); smc_enable_device(pdev); if (netif_running(ndev)) { smc_reset(ndev); smc_enable(ndev); if (lp->phy_type != 0) smc_phy_configure(&lp->phy_configure); netif_device_attach(ndev); } } return 0; } #ifdef CONFIG_OF static const struct of_device_id smc91x_match[] = { { .compatible = "smsc,lan91c94", }, { .compatible = "smsc,lan91c111", }, {}, }; MODULE_DEVICE_TABLE(of, smc91x_match); #else #define smc91x_match NULL #endif static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, }; static struct platform_driver smc_driver = { .probe = smc_drv_probe, .remove = __devexit_p(smc_drv_remove), .driver = { .name = CARDNAME, .owner = THIS_MODULE, .pm = &smc_drv_pm_ops, .of_match_table = smc91x_match, }, }; module_platform_driver(smc_driver);
gpl-2.0
ausdim/GE-Edition-I9505-jfltexx-new
net/ipv4/tcp_memcontrol.c
4773
6740
#include <net/tcp.h> #include <net/tcp_memcontrol.h> #include <net/sock.h> #include <net/ip.h> #include <linux/nsproxy.h> #include <linux/memcontrol.h> #include <linux/module.h> static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft); static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, const char *buffer); static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event); static struct cftype tcp_files[] = { { .name = "kmem.tcp.limit_in_bytes", .write_string = tcp_cgroup_write, .read_u64 = tcp_cgroup_read, .private = RES_LIMIT, }, { .name = "kmem.tcp.usage_in_bytes", .read_u64 = tcp_cgroup_read, .private = RES_USAGE, }, { .name = "kmem.tcp.failcnt", .private = RES_FAILCNT, .trigger = tcp_cgroup_reset, .read_u64 = tcp_cgroup_read, }, { .name = "kmem.tcp.max_usage_in_bytes", .private = RES_MAX_USAGE, .trigger = tcp_cgroup_reset, .read_u64 = tcp_cgroup_read, }, }; static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto) { return container_of(cg_proto, struct tcp_memcontrol, cg_proto); } static void memcg_tcp_enter_memory_pressure(struct sock *sk) { if (sk->sk_cgrp->memory_pressure) *sk->sk_cgrp->memory_pressure = 1; } EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) { /* * The root cgroup does not use res_counters, but rather, * rely on the data already collected by the network * subsystem */ struct res_counter *res_parent = NULL; struct cg_proto *cg_proto, *parent_cg; struct tcp_memcontrol *tcp; struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *parent = parent_mem_cgroup(memcg); struct net *net = current->nsproxy->net_ns; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) goto create_files; tcp = tcp_from_cgproto(cg_proto); tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0]; tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1]; tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2]; tcp->tcp_memory_pressure = 0; parent_cg = tcp_prot.proto_cgroup(parent); if (parent_cg) res_parent = parent_cg->memory_allocated; res_counter_init(&tcp->tcp_memory_allocated, res_parent); percpu_counter_init(&tcp->tcp_sockets_allocated, 0); cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure; cg_proto->memory_pressure = &tcp->tcp_memory_pressure; cg_proto->sysctl_mem = tcp->tcp_prot_mem; cg_proto->memory_allocated = &tcp->tcp_memory_allocated; cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated; cg_proto->memcg = memcg; create_files: return cgroup_add_files(cgrp, ss, tcp_files, ARRAY_SIZE(tcp_files)); } EXPORT_SYMBOL(tcp_init_cgroup); void tcp_destroy_cgroup(struct cgroup *cgrp) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct cg_proto *cg_proto; struct tcp_memcontrol *tcp; u64 val; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return; tcp = tcp_from_cgproto(cg_proto); percpu_counter_destroy(&tcp->tcp_sockets_allocated); val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); if (val != RESOURCE_MAX) static_key_slow_dec(&memcg_socket_limit_enabled); } EXPORT_SYMBOL(tcp_destroy_cgroup); static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) { struct net *net = current->nsproxy->net_ns; struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; u64 old_lim; int i; int ret; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return -EINVAL; if (val > RESOURCE_MAX) val = RESOURCE_MAX; tcp = tcp_from_cgproto(cg_proto); old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val); if (ret) return ret; for (i = 0; i < 3; i++) tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, net->ipv4.sysctl_tcp_mem[i]); if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) static_key_slow_dec(&memcg_socket_limit_enabled); else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) static_key_slow_inc(&memcg_socket_limit_enabled); return 0; } static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, const char *buffer) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); unsigned long long val; int ret = 0; switch (cft->private) { case RES_LIMIT: /* see memcontrol.c */ ret = res_counter_memparse_write_strategy(buffer, &val); if (ret) break; ret = tcp_update_limit(memcg, val); break; default: ret = -EINVAL; break; } return ret; } static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val) { struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return default_val; tcp = tcp_from_cgproto(cg_proto); return res_counter_read_u64(&tcp->tcp_memory_allocated, type); } static u64 tcp_read_usage(struct mem_cgroup *memcg) { struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT; tcp = tcp_from_cgproto(cg_proto); return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE); } static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); u64 val; switch (cft->private) { case RES_LIMIT: val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX); break; case RES_USAGE: val = tcp_read_usage(memcg); break; case RES_FAILCNT: case RES_MAX_USAGE: val = tcp_read_stat(memcg, cft->private, 0); break; default: BUG(); } return val; } static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event) { struct mem_cgroup *memcg; struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; memcg = mem_cgroup_from_cont(cont); cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return 0; tcp = tcp_from_cgproto(cg_proto); switch (event) { case RES_MAX_USAGE: res_counter_reset_max(&tcp->tcp_memory_allocated); break; case RES_FAILCNT: res_counter_reset_failcnt(&tcp->tcp_memory_allocated); break; } return 0; } unsigned long long tcp_max_memory(const struct mem_cgroup *memcg) { struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg); if (!cg_proto) return 0; tcp = tcp_from_cgproto(cg_proto); return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); } void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx) { struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return; tcp = tcp_from_cgproto(cg_proto); tcp->tcp_prot_mem[idx] = val; }
gpl-2.0
TeamNDVRu/htc-kernel-endeavoru
drivers/input/joystick/stinger.c
9893
5817
/* * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2000 Mark Fletcher */ /* * Gravis Stinger gamepad driver for Linux */ /* * This program is free warftware; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Gravis Stinger gamepad driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define STINGER_MAX_LENGTH 8 /* * Per-Stinger data. */ struct stinger { struct input_dev *dev; int idx; unsigned char data[STINGER_MAX_LENGTH]; char phys[32]; }; /* * stinger_process_packet() decodes packets the driver receives from the * Stinger. It updates the data accordingly. */ static void stinger_process_packet(struct stinger *stinger) { struct input_dev *dev = stinger->dev; unsigned char *data = stinger->data; if (!stinger->idx) return; input_report_key(dev, BTN_A, ((data[0] & 0x20) >> 5)); input_report_key(dev, BTN_B, ((data[0] & 0x10) >> 4)); input_report_key(dev, BTN_C, ((data[0] & 0x08) >> 3)); input_report_key(dev, BTN_X, ((data[0] & 0x04) >> 2)); input_report_key(dev, BTN_Y, ((data[3] & 0x20) >> 5)); input_report_key(dev, BTN_Z, ((data[3] & 0x10) >> 4)); input_report_key(dev, BTN_TL, ((data[3] & 0x08) >> 3)); input_report_key(dev, BTN_TR, ((data[3] & 0x04) >> 2)); input_report_key(dev, BTN_SELECT, ((data[3] & 0x02) >> 1)); input_report_key(dev, BTN_START, (data[3] & 0x01)); input_report_abs(dev, ABS_X, (data[1] & 0x3F) - ((data[0] & 0x01) << 6)); input_report_abs(dev, ABS_Y, ((data[0] & 0x02) << 5) - (data[2] & 0x3F)); input_sync(dev); return; } /* * stinger_interrupt() is called by the low level driver when characters * are ready for us. We then buffer them for further processing, or call the * packet processing routine. */ static irqreturn_t stinger_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct stinger *stinger = serio_get_drvdata(serio); /* All Stinger packets are 4 bytes */ if (stinger->idx < STINGER_MAX_LENGTH) stinger->data[stinger->idx++] = data; if (stinger->idx == 4) { stinger_process_packet(stinger); stinger->idx = 0; } return IRQ_HANDLED; } /* * stinger_disconnect() is the opposite of stinger_connect() */ static void stinger_disconnect(struct serio *serio) { struct stinger *stinger = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(stinger->dev); kfree(stinger); } /* * stinger_connect() is the routine that is called when someone adds a * new serio device that supports Stinger protocol and registers it as * an input device. */ static int stinger_connect(struct serio *serio, struct serio_driver *drv) { struct stinger *stinger; struct input_dev *input_dev; int err = -ENOMEM; stinger = kmalloc(sizeof(struct stinger), GFP_KERNEL); input_dev = input_allocate_device(); if (!stinger || !input_dev) goto fail1; stinger->dev = input_dev; snprintf(stinger->phys, sizeof(stinger->phys), "%s/serio0", serio->phys); input_dev->name = "Gravis Stinger"; input_dev->phys = stinger->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_STINGER; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_A)] = BIT_MASK(BTN_A) | BIT_MASK(BTN_B) | BIT_MASK(BTN_C) | BIT_MASK(BTN_X) | BIT_MASK(BTN_Y) | BIT_MASK(BTN_Z) | BIT_MASK(BTN_TL) | BIT_MASK(BTN_TR) | BIT_MASK(BTN_START) | BIT_MASK(BTN_SELECT); input_set_abs_params(input_dev, ABS_X, -64, 64, 0, 4); input_set_abs_params(input_dev, ABS_Y, -64, 64, 0, 4); serio_set_drvdata(serio, stinger); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(stinger->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(stinger); return err; } /* * The serio driver structure. */ static struct serio_device_id stinger_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_STINGER, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, stinger_serio_ids); static struct serio_driver stinger_drv = { .driver = { .name = "stinger", }, .description = DRIVER_DESC, .id_table = stinger_serio_ids, .interrupt = stinger_interrupt, .connect = stinger_connect, .disconnect = stinger_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init stinger_init(void) { return serio_register_driver(&stinger_drv); } static void __exit stinger_exit(void) { serio_unregister_driver(&stinger_drv); } module_init(stinger_init); module_exit(stinger_exit);
gpl-2.0
Hardslog/nvidia-tegra
drivers/input/touchscreen/inexio.c
9893
4900
/* * iNexio serial touchscreen driver * * Copyright (c) 2008 Richard Lemon * Based on the mtouch driver (c) Vojtech Pavlik and Dan Streetman * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * 2008/06/19 Richard Lemon <richard@codelemon.com> * Copied mtouch.c and edited for iNexio protocol */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "iNexio serial touchscreen driver" MODULE_AUTHOR("Richard Lemon <richard@codelemon.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define INEXIO_FORMAT_TOUCH_BIT 0x01 #define INEXIO_FORMAT_LENGTH 5 #define INEXIO_RESPONSE_BEGIN_BYTE 0x80 /* todo: check specs for max length of all responses */ #define INEXIO_MAX_LENGTH 16 #define INEXIO_MIN_XC 0 #define INEXIO_MAX_XC 0x3fff #define INEXIO_MIN_YC 0 #define INEXIO_MAX_YC 0x3fff #define INEXIO_GET_XC(data) (((data[1])<<7) | data[2]) #define INEXIO_GET_YC(data) (((data[3])<<7) | data[4]) #define INEXIO_GET_TOUCHED(data) (INEXIO_FORMAT_TOUCH_BIT & data[0]) /* * Per-touchscreen data. */ struct inexio { struct input_dev *dev; struct serio *serio; int idx; unsigned char data[INEXIO_MAX_LENGTH]; char phys[32]; }; static void inexio_process_data(struct inexio *pinexio) { struct input_dev *dev = pinexio->dev; if (INEXIO_FORMAT_LENGTH == ++pinexio->idx) { input_report_abs(dev, ABS_X, INEXIO_GET_XC(pinexio->data)); input_report_abs(dev, ABS_Y, INEXIO_GET_YC(pinexio->data)); input_report_key(dev, BTN_TOUCH, INEXIO_GET_TOUCHED(pinexio->data)); input_sync(dev); pinexio->idx = 0; } } static irqreturn_t inexio_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct inexio* pinexio = serio_get_drvdata(serio); pinexio->data[pinexio->idx] = data; if (INEXIO_RESPONSE_BEGIN_BYTE&pinexio->data[0]) inexio_process_data(pinexio); else printk(KERN_DEBUG "inexio.c: unknown/unsynchronized data from device, byte %x\n",pinexio->data[0]); return IRQ_HANDLED; } /* * inexio_disconnect() is the opposite of inexio_connect() */ static void inexio_disconnect(struct serio *serio) { struct inexio* pinexio = serio_get_drvdata(serio); input_get_device(pinexio->dev); input_unregister_device(pinexio->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(pinexio->dev); kfree(pinexio); } /* * inexio_connect() is the routine that is called when someone adds a * new serio device that supports iNexio protocol and registers it as * an input device. This is usually accomplished using inputattach. */ static int inexio_connect(struct serio *serio, struct serio_driver *drv) { struct inexio *pinexio; struct input_dev *input_dev; int err; pinexio = kzalloc(sizeof(struct inexio), GFP_KERNEL); input_dev = input_allocate_device(); if (!pinexio || !input_dev) { err = -ENOMEM; goto fail1; } pinexio->serio = serio; pinexio->dev = input_dev; snprintf(pinexio->phys, sizeof(pinexio->phys), "%s/input0", serio->phys); input_dev->name = "iNexio Serial TouchScreen"; input_dev->phys = pinexio->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_INEXIO; input_dev->id.product = 0; input_dev->id.version = 0x0001; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(pinexio->dev, ABS_X, INEXIO_MIN_XC, INEXIO_MAX_XC, 0, 0); input_set_abs_params(pinexio->dev, ABS_Y, INEXIO_MIN_YC, INEXIO_MAX_YC, 0, 0); serio_set_drvdata(serio, pinexio); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(pinexio->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(pinexio); return err; } /* * The serio driver structure. */ static struct serio_device_id inexio_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_INEXIO, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, inexio_serio_ids); static struct serio_driver inexio_drv = { .driver = { .name = "inexio", }, .description = DRIVER_DESC, .id_table = inexio_serio_ids, .interrupt = inexio_interrupt, .connect = inexio_connect, .disconnect = inexio_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init inexio_init(void) { return serio_register_driver(&inexio_drv); } static void __exit inexio_exit(void) { serio_unregister_driver(&inexio_drv); } module_init(inexio_init); module_exit(inexio_exit);
gpl-2.0
drgroovestarr/kernel_samsung_manta
drivers/ptp/ptp_sysfs.c
10405
5915
/* * PTP 1588 clock support - sysfs interface. * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/capability.h> #include "ptp_private.h" static ssize_t clock_name_show(struct device *dev, struct device_attribute *attr, char *page) { struct ptp_clock *ptp = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); } #define PTP_SHOW_INT(name) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *page) \ { \ struct ptp_clock *ptp = dev_get_drvdata(dev); \ return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \ } PTP_SHOW_INT(max_adj); PTP_SHOW_INT(n_alarm); PTP_SHOW_INT(n_ext_ts); PTP_SHOW_INT(n_per_out); PTP_SHOW_INT(pps); #define PTP_RO_ATTR(_var, _name) { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = _var##_show, \ } struct device_attribute ptp_dev_attrs[] = { PTP_RO_ATTR(clock_name, clock_name), PTP_RO_ATTR(max_adj, max_adjustment), PTP_RO_ATTR(n_alarm, n_alarms), PTP_RO_ATTR(n_ext_ts, n_external_timestamps), PTP_RO_ATTR(n_per_out, n_periodic_outputs), PTP_RO_ATTR(pps, pps_available), __ATTR_NULL, }; static ssize_t extts_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ptp_clock *ptp = dev_get_drvdata(dev); struct ptp_clock_info *ops = ptp->info; struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS }; int cnt, enable; int err = -EINVAL; cnt = sscanf(buf, "%u %d", &req.extts.index, &enable); if (cnt != 2) goto out; if (req.extts.index >= ops->n_ext_ts) goto out; err = ops->enable(ops, &req, enable ? 1 : 0); if (err) goto out; return count; out: return err; } static ssize_t extts_fifo_show(struct device *dev, struct device_attribute *attr, char *page) { struct ptp_clock *ptp = dev_get_drvdata(dev); struct timestamp_event_queue *queue = &ptp->tsevq; struct ptp_extts_event event; unsigned long flags; size_t qcnt; int cnt = 0; memset(&event, 0, sizeof(event)); if (mutex_lock_interruptible(&ptp->tsevq_mux)) return -ERESTARTSYS; spin_lock_irqsave(&queue->lock, flags); qcnt = queue_cnt(queue); if (qcnt) { event = queue->buf[queue->head]; queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; } spin_unlock_irqrestore(&queue->lock, flags); if (!qcnt) goto out; cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n", event.index, event.t.sec, event.t.nsec); out: mutex_unlock(&ptp->tsevq_mux); return cnt; } static ssize_t period_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ptp_clock *ptp = dev_get_drvdata(dev); struct ptp_clock_info *ops = ptp->info; struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT }; int cnt, enable, err = -EINVAL; cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index, &req.perout.start.sec, &req.perout.start.nsec, &req.perout.period.sec, &req.perout.period.nsec); if (cnt != 5) goto out; if (req.perout.index >= ops->n_per_out) goto out; enable = req.perout.period.sec || req.perout.period.nsec; err = ops->enable(ops, &req, enable); if (err) goto out; return count; out: return err; } static ssize_t pps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ptp_clock *ptp = dev_get_drvdata(dev); struct ptp_clock_info *ops = ptp->info; struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS }; int cnt, enable; int err = -EINVAL; if (!capable(CAP_SYS_TIME)) return -EPERM; cnt = sscanf(buf, "%d", &enable); if (cnt != 1) goto out; err = ops->enable(ops, &req, enable ? 1 : 0); if (err) goto out; return count; out: return err; } static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); static DEVICE_ATTR(period, 0220, NULL, period_store); static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); int ptp_cleanup_sysfs(struct ptp_clock *ptp) { struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; if (info->n_ext_ts) { device_remove_file(dev, &dev_attr_extts_enable); device_remove_file(dev, &dev_attr_fifo); } if (info->n_per_out) device_remove_file(dev, &dev_attr_period); if (info->pps) device_remove_file(dev, &dev_attr_pps_enable); return 0; } int ptp_populate_sysfs(struct ptp_clock *ptp) { struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; int err; if (info->n_ext_ts) { err = device_create_file(dev, &dev_attr_extts_enable); if (err) goto out1; err = device_create_file(dev, &dev_attr_fifo); if (err) goto out2; } if (info->n_per_out) { err = device_create_file(dev, &dev_attr_period); if (err) goto out3; } if (info->pps) { err = device_create_file(dev, &dev_attr_pps_enable); if (err) goto out4; } return 0; out4: if (info->n_per_out) device_remove_file(dev, &dev_attr_period); out3: if (info->n_ext_ts) device_remove_file(dev, &dev_attr_fifo); out2: if (info->n_ext_ts) device_remove_file(dev, &dev_attr_extts_enable); out1: return err; }
gpl-2.0
gchild320/shamu-old
arch/alpha/kernel/irq_i8259.c
11941
3962
/* * linux/arch/alpha/kernel/irq_i8259.c * * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c. */ #include <linux/init.h> #include <linux/cache.h> #include <linux/sched.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/io.h> #include "proto.h" #include "irq_impl.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned int cached_irq_mask = 0xffff; static DEFINE_SPINLOCK(i8259_irq_lock); static inline void i8259_update_irq_hw(unsigned int irq, unsigned long mask) { int port = 0x21; if (irq & 8) mask >>= 8; if (irq & 8) port = 0xA1; outb(mask, port); } inline void i8259a_enable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); spin_unlock(&i8259_irq_lock); } static inline void __i8259a_disable_irq(unsigned int irq) { i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq); } void i8259a_disable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); __i8259a_disable_irq(d->irq); spin_unlock(&i8259_irq_lock); } void i8259a_mask_and_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; spin_lock(&i8259_irq_lock); __i8259a_disable_irq(irq); /* Ack the interrupt making it the lowest priority. */ if (irq >= 8) { outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */ irq = 2; } outb(0xE0 | irq, 0x20); /* ack the master */ spin_unlock(&i8259_irq_lock); } struct irq_chip i8259a_irq_type = { .name = "XT-PIC", .irq_unmask = i8259a_enable_irq, .irq_mask = i8259a_disable_irq, .irq_mask_ack = i8259a_mask_and_ack_irq, }; void __init init_i8259a_irqs(void) { static struct irqaction cascade = { .handler = no_action, .name = "cascade", }; long i; outb(0xff, 0x21); /* mask all of 8259A-1 */ outb(0xff, 0xA1); /* mask all of 8259A-2 */ for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); } setup_irq(2, &cascade); } #if defined(CONFIG_ALPHA_GENERIC) # define IACK_SC alpha_mv.iack_sc #elif defined(CONFIG_ALPHA_APECS) # define IACK_SC APECS_IACK_SC #elif defined(CONFIG_ALPHA_LCA) # define IACK_SC LCA_IACK_SC #elif defined(CONFIG_ALPHA_CIA) # define IACK_SC CIA_IACK_SC #elif defined(CONFIG_ALPHA_PYXIS) # define IACK_SC PYXIS_IACK_SC #elif defined(CONFIG_ALPHA_TITAN) # define IACK_SC TITAN_IACK_SC #elif defined(CONFIG_ALPHA_TSUNAMI) # define IACK_SC TSUNAMI_IACK_SC #elif defined(CONFIG_ALPHA_IRONGATE) # define IACK_SC IRONGATE_IACK_SC #endif /* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */ #if defined(IACK_SC) void isa_device_interrupt(unsigned long vector) { /* * Generate a PCI interrupt acknowledge cycle. The PIC will * respond with the interrupt vector of the highest priority * interrupt that is pending. The PALcode sets up the * interrupts vectors such that irq level L generates vector L. */ int j = *(vuip) IACK_SC; j &= 0xff; handle_irq(j); } #endif #if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC) void isa_no_iack_sc_device_interrupt(unsigned long vector) { unsigned long pic; /* * It seems to me that the probability of two or more *device* * interrupts occurring at almost exactly the same time is * pretty low. So why pay the price of checking for * additional interrupts here if the common case can be * handled so much easier? */ /* * The first read of gives you *all* interrupting lines. * Therefore, read the mask register and and out those lines * not enabled. Note that some documentation has 21 and a1 * write only. This is not true. */ pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */ pic &= 0xFFFB; /* mask out cascade & hibits */ while (pic) { int j = ffz(~pic); pic &= pic - 1; handle_irq(j); } } #endif
gpl-2.0
Fechinator/FechdaKernelReloaded6.0
arch/mn10300/proc-mn103e010/proc-init.c
11941
2921
/* MN103E010 Processor initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <asm/fpu.h> #include <asm/rtc.h> #include <asm/busctl-regs.h> /* * initialise the on-silicon processor peripherals */ asmlinkage void __init processor_init(void) { int loop; /* set up the exception table first */ for (loop = 0x000; loop < 0x400; loop += 8) __set_intr_stub(loop, __common_exception); __set_intr_stub(EXCEP_ITLBMISS, itlb_miss); __set_intr_stub(EXCEP_DTLBMISS, dtlb_miss); __set_intr_stub(EXCEP_IAERROR, itlb_aerror); __set_intr_stub(EXCEP_DAERROR, dtlb_aerror); __set_intr_stub(EXCEP_BUSERROR, raw_bus_error); __set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault); __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled); __set_intr_stub(EXCEP_SYSCALL0, system_call); __set_intr_stub(EXCEP_NMI, nmi_handler); __set_intr_stub(EXCEP_WDT, nmi_handler); __set_intr_stub(EXCEP_IRQ_LEVEL0, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL1, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL2, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL3, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL4, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL5, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL6, irq_handler); IVAR0 = EXCEP_IRQ_LEVEL0; IVAR1 = EXCEP_IRQ_LEVEL1; IVAR2 = EXCEP_IRQ_LEVEL2; IVAR3 = EXCEP_IRQ_LEVEL3; IVAR4 = EXCEP_IRQ_LEVEL4; IVAR5 = EXCEP_IRQ_LEVEL5; IVAR6 = EXCEP_IRQ_LEVEL6; mn10300_dcache_flush_inv(); mn10300_icache_inv(); /* disable all interrupts and set to priority 6 (lowest) */ for (loop = 0; loop < NR_IRQS; loop++) GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; /* clear the timers */ TM0MD = 0; TM1MD = 0; TM2MD = 0; TM3MD = 0; TM4MD = 0; TM5MD = 0; TM6MD = 0; TM6MDA = 0; TM6MDB = 0; TM7MD = 0; TM8MD = 0; TM9MD = 0; TM10MD = 0; TM11MD = 0; calibrate_clock(); } /* * determine the memory size and base from the memory controller regs */ void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size) { unsigned long base, size; *mem_base = 0; *mem_size = 0; base = SDBASE(0); if (base & SDBASE_CE) { size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; size = ~size + 1; base &= SDBASE_CBA; printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base); *mem_size += size; *mem_base = base; } base = SDBASE(1); if (base & SDBASE_CE) { size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; size = ~size + 1; base &= SDBASE_CBA; printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base); *mem_size += size; if (*mem_base == 0) *mem_base = base; } }
gpl-2.0
septazzz/Lonas_KL_GT-I9300
arch/mn10300/proc-mn103e010/proc-init.c
11941
2921
/* MN103E010 Processor initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <asm/fpu.h> #include <asm/rtc.h> #include <asm/busctl-regs.h> /* * initialise the on-silicon processor peripherals */ asmlinkage void __init processor_init(void) { int loop; /* set up the exception table first */ for (loop = 0x000; loop < 0x400; loop += 8) __set_intr_stub(loop, __common_exception); __set_intr_stub(EXCEP_ITLBMISS, itlb_miss); __set_intr_stub(EXCEP_DTLBMISS, dtlb_miss); __set_intr_stub(EXCEP_IAERROR, itlb_aerror); __set_intr_stub(EXCEP_DAERROR, dtlb_aerror); __set_intr_stub(EXCEP_BUSERROR, raw_bus_error); __set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault); __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled); __set_intr_stub(EXCEP_SYSCALL0, system_call); __set_intr_stub(EXCEP_NMI, nmi_handler); __set_intr_stub(EXCEP_WDT, nmi_handler); __set_intr_stub(EXCEP_IRQ_LEVEL0, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL1, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL2, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL3, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL4, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL5, irq_handler); __set_intr_stub(EXCEP_IRQ_LEVEL6, irq_handler); IVAR0 = EXCEP_IRQ_LEVEL0; IVAR1 = EXCEP_IRQ_LEVEL1; IVAR2 = EXCEP_IRQ_LEVEL2; IVAR3 = EXCEP_IRQ_LEVEL3; IVAR4 = EXCEP_IRQ_LEVEL4; IVAR5 = EXCEP_IRQ_LEVEL5; IVAR6 = EXCEP_IRQ_LEVEL6; mn10300_dcache_flush_inv(); mn10300_icache_inv(); /* disable all interrupts and set to priority 6 (lowest) */ for (loop = 0; loop < NR_IRQS; loop++) GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; /* clear the timers */ TM0MD = 0; TM1MD = 0; TM2MD = 0; TM3MD = 0; TM4MD = 0; TM5MD = 0; TM6MD = 0; TM6MDA = 0; TM6MDB = 0; TM7MD = 0; TM8MD = 0; TM9MD = 0; TM10MD = 0; TM11MD = 0; calibrate_clock(); } /* * determine the memory size and base from the memory controller regs */ void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size) { unsigned long base, size; *mem_base = 0; *mem_size = 0; base = SDBASE(0); if (base & SDBASE_CE) { size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; size = ~size + 1; base &= SDBASE_CBA; printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base); *mem_size += size; *mem_base = base; } base = SDBASE(1); if (base & SDBASE_CE) { size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; size = ~size + 1; base &= SDBASE_CBA; printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base); *mem_size += size; if (*mem_base == 0) *mem_base = base; } }
gpl-2.0
AndroidRoot/android_kernel_asus_tf101
fs/befs/datastream.c
11941
15907
/* * linux/fs/befs/datastream.c * * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com> * * Based on portions of file.c by Makoto Kato <m_kato@ga2.so-net.ne.jp> * * Many thanks to Dominic Giampaolo, author of "Practical File System * Design with the Be File System", for such a helpful book. * */ #include <linux/kernel.h> #include <linux/buffer_head.h> #include <linux/string.h> #include "befs.h" #include "datastream.h" #include "io.h" const befs_inode_addr BAD_IADDR = { 0, 0, 0 }; static int befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run); static int befs_find_brun_indirect(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run); static int befs_find_brun_dblindirect(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run); /** * befs_read_datastream - get buffer_head containing data, starting from pos. * @sb: Filesystem superblock * @ds: datastrem to find data with * @pos: start of data * @off: offset of data in buffer_head->b_data * * Returns pointer to buffer_head containing data starting with offset @off, * if you don't need to know offset just set @off = NULL. */ struct buffer_head * befs_read_datastream(struct super_block *sb, befs_data_stream * ds, befs_off_t pos, uint * off) { struct buffer_head *bh = NULL; befs_block_run run; befs_blocknr_t block; /* block coresponding to pos */ befs_debug(sb, "---> befs_read_datastream() %Lu", pos); block = pos >> BEFS_SB(sb)->block_shift; if (off) *off = pos - (block << BEFS_SB(sb)->block_shift); if (befs_fblock2brun(sb, ds, block, &run) != BEFS_OK) { befs_error(sb, "BeFS: Error finding disk addr of block %lu", block); befs_debug(sb, "<--- befs_read_datastream() ERROR"); return NULL; } bh = befs_bread_iaddr(sb, run); if (!bh) { befs_error(sb, "BeFS: Error reading block %lu from datastream", block); return NULL; } befs_debug(sb, "<--- befs_read_datastream() read data, starting at %Lu", pos); return bh; } /* * Takes a file position and gives back a brun who's starting block * is block number fblock of the file. * * Returns BEFS_OK or BEFS_ERR. * * Calls specialized functions for each of the three possible * datastream regions. * * 2001-11-15 Will Dyson */ int befs_fblock2brun(struct super_block *sb, befs_data_stream * data, befs_blocknr_t fblock, befs_block_run * run) { int err; befs_off_t pos = fblock << BEFS_SB(sb)->block_shift; if (pos < data->max_direct_range) { err = befs_find_brun_direct(sb, data, fblock, run); } else if (pos < data->max_indirect_range) { err = befs_find_brun_indirect(sb, data, fblock, run); } else if (pos < data->max_double_indirect_range) { err = befs_find_brun_dblindirect(sb, data, fblock, run); } else { befs_error(sb, "befs_fblock2brun() was asked to find block %lu, " "which is not mapped by the datastream\n", fblock); err = BEFS_ERR; } return err; } /** * befs_read_lsmylink - read long symlink from datastream. * @sb: Filesystem superblock * @ds: Datastrem to read from * @buf: Buffer in which to place long symlink data * @len: Length of the long symlink in bytes * * Returns the number of bytes read */ size_t befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff, befs_off_t len) { befs_off_t bytes_read = 0; /* bytes readed */ u16 plen; struct buffer_head *bh = NULL; befs_debug(sb, "---> befs_read_lsymlink() length: %Lu", len); while (bytes_read < len) { bh = befs_read_datastream(sb, ds, bytes_read, NULL); if (!bh) { befs_error(sb, "BeFS: Error reading datastream block " "starting from %Lu", bytes_read); befs_debug(sb, "<--- befs_read_lsymlink() ERROR"); return bytes_read; } plen = ((bytes_read + BEFS_SB(sb)->block_size) < len) ? BEFS_SB(sb)->block_size : len - bytes_read; memcpy(buff + bytes_read, bh->b_data, plen); brelse(bh); bytes_read += plen; } befs_debug(sb, "<--- befs_read_lsymlink() read %u bytes", bytes_read); return bytes_read; } /** * befs_count_blocks - blocks used by a file * @sb: Filesystem superblock * @ds: Datastream of the file * * Counts the number of fs blocks that the file represented by * inode occupies on the filesystem, counting both regular file * data and filesystem metadata (and eventually attribute data * when we support attributes) */ befs_blocknr_t befs_count_blocks(struct super_block * sb, befs_data_stream * ds) { befs_blocknr_t blocks; befs_blocknr_t datablocks; /* File data blocks */ befs_blocknr_t metablocks; /* FS metadata blocks */ befs_sb_info *befs_sb = BEFS_SB(sb); befs_debug(sb, "---> befs_count_blocks()"); datablocks = ds->size >> befs_sb->block_shift; if (ds->size & (befs_sb->block_size - 1)) datablocks += 1; metablocks = 1; /* Start with 1 block for inode */ /* Size of indirect block */ if (ds->size > ds->max_direct_range) metablocks += ds->indirect.len; /* Double indir block, plus all the indirect blocks it mapps In the double-indirect range, all block runs of data are BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know how many data block runs are in the double-indirect region, and from that we know how many indirect blocks it takes to map them. We assume that the indirect blocks are also BEFS_DBLINDIR_BRUN_LEN blocks long. */ if (ds->size > ds->max_indirect_range && ds->max_indirect_range != 0) { uint dbl_bytes; uint dbl_bruns; uint indirblocks; dbl_bytes = ds->max_double_indirect_range - ds->max_indirect_range; dbl_bruns = dbl_bytes / (befs_sb->block_size * BEFS_DBLINDIR_BRUN_LEN); indirblocks = dbl_bruns / befs_iaddrs_per_block(sb); metablocks += ds->double_indirect.len; metablocks += indirblocks; } blocks = datablocks + metablocks; befs_debug(sb, "<--- befs_count_blocks() %u blocks", blocks); return blocks; } /* Finds the block run that starts at file block number blockno in the file represented by the datastream data, if that blockno is in the direct region of the datastream. sb: the superblock data: the datastream blockno: the blocknumber to find run: The found run is passed back through this pointer Return value is BEFS_OK if the blockrun is found, BEFS_ERR otherwise. Algorithm: Linear search. Checks each element of array[] to see if it contains the blockno-th filesystem block. This is necessary because the block runs map variable amounts of data. Simply keeps a count of the number of blocks searched so far (sum), incrementing this by the length of each block run as we come across it. Adds sum to *count before returning (this is so you can search multiple arrays that are logicaly one array, as in the indirect region code). When/if blockno is found, if blockno is inside of a block run as stored on disk, we offset the start and length members of the block run, so that blockno is the start and len is still valid (the run ends in the same place). 2001-11-15 Will Dyson */ static int befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run) { int i; befs_block_run *array = data->direct; befs_blocknr_t sum; befs_blocknr_t max_block = data->max_direct_range >> BEFS_SB(sb)->block_shift; befs_debug(sb, "---> befs_find_brun_direct(), find %lu", blockno); if (blockno > max_block) { befs_error(sb, "befs_find_brun_direct() passed block outside of" "direct region"); return BEFS_ERR; } for (i = 0, sum = 0; i < BEFS_NUM_DIRECT_BLOCKS; sum += array[i].len, i++) { if (blockno >= sum && blockno < sum + (array[i].len)) { int offset = blockno - sum; run->allocation_group = array[i].allocation_group; run->start = array[i].start + offset; run->len = array[i].len - offset; befs_debug(sb, "---> befs_find_brun_direct(), " "found %lu at direct[%d]", blockno, i); return BEFS_OK; } } befs_debug(sb, "---> befs_find_brun_direct() ERROR"); return BEFS_ERR; } /* Finds the block run that starts at file block number blockno in the file represented by the datastream data, if that blockno is in the indirect region of the datastream. sb: the superblock data: the datastream blockno: the blocknumber to find run: The found run is passed back through this pointer Return value is BEFS_OK if the blockrun is found, BEFS_ERR otherwise. Algorithm: For each block in the indirect run of the datastream, read it in and search through it for search_blk. XXX: Really should check to make sure blockno is inside indirect region. 2001-11-15 Will Dyson */ static int befs_find_brun_indirect(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run) { int i, j; befs_blocknr_t sum = 0; befs_blocknr_t indir_start_blk; befs_blocknr_t search_blk; struct buffer_head *indirblock; befs_disk_block_run *array; befs_block_run indirect = data->indirect; befs_blocknr_t indirblockno = iaddr2blockno(sb, &indirect); int arraylen = befs_iaddrs_per_block(sb); befs_debug(sb, "---> befs_find_brun_indirect(), find %lu", blockno); indir_start_blk = data->max_direct_range >> BEFS_SB(sb)->block_shift; search_blk = blockno - indir_start_blk; /* Examine blocks of the indirect run one at a time */ for (i = 0; i < indirect.len; i++) { indirblock = befs_bread(sb, indirblockno + i); if (indirblock == NULL) { befs_debug(sb, "---> befs_find_brun_indirect() failed to " "read disk block %lu from the indirect brun", indirblockno + i); return BEFS_ERR; } array = (befs_disk_block_run *) indirblock->b_data; for (j = 0; j < arraylen; ++j) { int len = fs16_to_cpu(sb, array[j].len); if (search_blk >= sum && search_blk < sum + len) { int offset = search_blk - sum; run->allocation_group = fs32_to_cpu(sb, array[j].allocation_group); run->start = fs16_to_cpu(sb, array[j].start) + offset; run->len = fs16_to_cpu(sb, array[j].len) - offset; brelse(indirblock); befs_debug(sb, "<--- befs_find_brun_indirect() found " "file block %lu at indirect[%d]", blockno, j + (i * arraylen)); return BEFS_OK; } sum += len; } brelse(indirblock); } /* Only fallthrough is an error */ befs_error(sb, "BeFS: befs_find_brun_indirect() failed to find " "file block %lu", blockno); befs_debug(sb, "<--- befs_find_brun_indirect() ERROR"); return BEFS_ERR; } /* Finds the block run that starts at file block number blockno in the file represented by the datastream data, if that blockno is in the double-indirect region of the datastream. sb: the superblock data: the datastream blockno: the blocknumber to find run: The found run is passed back through this pointer Return value is BEFS_OK if the blockrun is found, BEFS_ERR otherwise. Algorithm: The block runs in the double-indirect region are different. They are always allocated 4 fs blocks at a time, so each block run maps a constant amount of file data. This means that we can directly calculate how many block runs into the double-indirect region we need to go to get to the one that maps a particular filesystem block. We do this in two stages. First we calculate which of the inode addresses in the double-indirect block will point us to the indirect block that contains the mapping for the data, then we calculate which of the inode addresses in that indirect block maps the data block we are after. Oh, and once we've done that, we actually read in the blocks that contain the inode addresses we calculated above. Even though the double-indirect run may be several blocks long, we can calculate which of those blocks will contain the index we are after and only read that one. We then follow it to the indirect block and perform a similar process to find the actual block run that maps the data block we are interested in. Then we offset the run as in befs_find_brun_array() and we are done. 2001-11-15 Will Dyson */ static int befs_find_brun_dblindirect(struct super_block *sb, befs_data_stream * data, befs_blocknr_t blockno, befs_block_run * run) { int dblindir_indx; int indir_indx; int offset; int dbl_which_block; int which_block; int dbl_block_indx; int block_indx; off_t dblindir_leftover; befs_blocknr_t blockno_at_run_start; struct buffer_head *dbl_indir_block; struct buffer_head *indir_block; befs_block_run indir_run; befs_disk_inode_addr *iaddr_array = NULL; befs_sb_info *befs_sb = BEFS_SB(sb); befs_blocknr_t indir_start_blk = data->max_indirect_range >> befs_sb->block_shift; off_t dbl_indir_off = blockno - indir_start_blk; /* number of data blocks mapped by each of the iaddrs in * the indirect block pointed to by the double indirect block */ size_t iblklen = BEFS_DBLINDIR_BRUN_LEN; /* number of data blocks mapped by each of the iaddrs in * the double indirect block */ size_t diblklen = iblklen * befs_iaddrs_per_block(sb) * BEFS_DBLINDIR_BRUN_LEN; befs_debug(sb, "---> befs_find_brun_dblindirect() find %lu", blockno); /* First, discover which of the double_indir->indir blocks * contains pos. Then figure out how much of pos that * accounted for. Then discover which of the iaddrs in * the indirect block contains pos. */ dblindir_indx = dbl_indir_off / diblklen; dblindir_leftover = dbl_indir_off % diblklen; indir_indx = dblindir_leftover / diblklen; /* Read double indirect block */ dbl_which_block = dblindir_indx / befs_iaddrs_per_block(sb); if (dbl_which_block > data->double_indirect.len) { befs_error(sb, "The double-indirect index calculated by " "befs_read_brun_dblindirect(), %d, is outside the range " "of the double-indirect block", dblindir_indx); return BEFS_ERR; } dbl_indir_block = befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) + dbl_which_block); if (dbl_indir_block == NULL) { befs_error(sb, "befs_read_brun_dblindirect() couldn't read the " "double-indirect block at blockno %lu", iaddr2blockno(sb, &data->double_indirect) + dbl_which_block); brelse(dbl_indir_block); return BEFS_ERR; } dbl_block_indx = dblindir_indx - (dbl_which_block * befs_iaddrs_per_block(sb)); iaddr_array = (befs_disk_inode_addr *) dbl_indir_block->b_data; indir_run = fsrun_to_cpu(sb, iaddr_array[dbl_block_indx]); brelse(dbl_indir_block); iaddr_array = NULL; /* Read indirect block */ which_block = indir_indx / befs_iaddrs_per_block(sb); if (which_block > indir_run.len) { befs_error(sb, "The indirect index calculated by " "befs_read_brun_dblindirect(), %d, is outside the range " "of the indirect block", indir_indx); return BEFS_ERR; } indir_block = befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block); if (indir_block == NULL) { befs_error(sb, "befs_read_brun_dblindirect() couldn't read the " "indirect block at blockno %lu", iaddr2blockno(sb, &indir_run) + which_block); brelse(indir_block); return BEFS_ERR; } block_indx = indir_indx - (which_block * befs_iaddrs_per_block(sb)); iaddr_array = (befs_disk_inode_addr *) indir_block->b_data; *run = fsrun_to_cpu(sb, iaddr_array[block_indx]); brelse(indir_block); iaddr_array = NULL; blockno_at_run_start = indir_start_blk; blockno_at_run_start += diblklen * dblindir_indx; blockno_at_run_start += iblklen * indir_indx; offset = blockno - blockno_at_run_start; run->start += offset; run->len -= offset; befs_debug(sb, "Found file block %lu in double_indirect[%d][%d]," " double_indirect_leftover = %lu", blockno, dblindir_indx, indir_indx, dblindir_leftover); return BEFS_OK; }
gpl-2.0
hroark13/n861_two_n860
net/netfilter/xt_state.c
12709
2008
/* Kernel module to match connection tracking information. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_state.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module"); MODULE_ALIAS("ipt_state"); MODULE_ALIAS("ip6t_state"); static bool state_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct) statebit = XT_STATE_INVALID; else { if (nf_ct_is_untracked(ct)) statebit = XT_STATE_UNTRACKED; else statebit = XT_STATE_BIT(ctinfo); } return (sinfo->statemask & statebit); } static int state_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void state_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_match state_mt_reg __read_mostly = { .name = "state", .family = NFPROTO_UNSPEC, .checkentry = state_mt_check, .match = state_mt, .destroy = state_mt_destroy, .matchsize = sizeof(struct xt_state_info), .me = THIS_MODULE, }; static int __init state_mt_init(void) { return xt_register_match(&state_mt_reg); } static void __exit state_mt_exit(void) { xt_unregister_match(&state_mt_reg); } module_init(state_mt_init); module_exit(state_mt_exit);
gpl-2.0
junkie2100/android_kernel_zte_quantum
arch/alpha/kernel/asm-offsets.c
13733
1456
/* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/kbuild.h> #include <asm/io.h> void foo(void) { DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); DEFINE(CRED_UID, offsetof(struct cred, uid)); DEFINE(CRED_EUID, offsetof(struct cred, euid)); DEFINE(CRED_GID, offsetof(struct cred, gid)); DEFINE(CRED_EGID, offsetof(struct cred, egid)); BLANK(); DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(SIGCHLD, SIGCHLD); BLANK(); DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); }
gpl-2.0
robertobinsely/prd
sound/soc/sh/rcar/src.c
422
16087
/* * Renesas R-Car SRC support * * Copyright (C) 2013 Renesas Solutions Corp. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "rsnd.h" #define SRC_NAME "src" struct rsnd_src { struct rsnd_src_platform_info *info; /* rcar_snd.h */ struct rsnd_mod mod; struct clk *clk; }; #define RSND_SRC_NAME_SIZE 16 #define rsnd_src_convert_rate(p) ((p)->info->convert_rate) #define rsnd_mod_to_src(_mod) \ container_of((_mod), struct rsnd_src, mod) #define rsnd_src_dma_available(src) \ rsnd_dma_available(rsnd_mod_to_dma(&(src)->mod)) #define for_each_rsnd_src(pos, priv, i) \ for ((i) = 0; \ ((i) < rsnd_src_nr(priv)) && \ ((pos) = (struct rsnd_src *)(priv)->src + i); \ i++) /* * image of SRC (Sampling Rate Converter) * * 96kHz <-> +-----+ 48kHz +-----+ 48kHz +-------+ * 48kHz <-> | SRC | <------> | SSI | <-----> | codec | * 44.1kHz <-> +-----+ +-----+ +-------+ * ... * */ /* * src.c is caring... * * Gen1 * * [mem] -> [SRU] -> [SSI] * |--------| * * Gen2 * * [mem] -> [SRC] -> [SSIU] -> [SSI] * |-----------------| */ /* * How to use SRC bypass mode for debugging * * SRC has bypass mode, and it is useful for debugging. * In Gen2 case, * SRCm_MODE controls whether SRC is used or not * SSI_MODE0 controls whether SSIU which receives SRC data * is used or not. * Both SRCm_MODE/SSI_MODE0 settings are needed if you use SRC, * but SRC bypass mode needs SSI_MODE0 only. * * This driver request * struct rsnd_src_platform_info { * u32 convert_rate; * int dma_id; * } * * rsnd_src_convert_rate() indicates * above convert_rate, and it controls * whether SRC is used or not. * * ex) doesn't use SRC * static struct rsnd_dai_platform_info rsnd_dai = { * .playback = { .ssi = &rsnd_ssi[0], }, * }; * * ex) uses SRC * static struct rsnd_src_platform_info rsnd_src[] = { * RSND_SCU(48000, 0), * ... * }; * static struct rsnd_dai_platform_info rsnd_dai = { * .playback = { .ssi = &rsnd_ssi[0], .src = &rsnd_src[0] }, * }; * * ex) uses SRC bypass mode * static struct rsnd_src_platform_info rsnd_src[] = { * RSND_SCU(0, 0), * ... * }; * static struct rsnd_dai_platform_info rsnd_dai = { * .playback = { .ssi = &rsnd_ssi[0], .src = &rsnd_src[0] }, * }; * */ /* * Gen1/Gen2 common functions */ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod, struct rsnd_dai *rdai, int use_busif) { struct rsnd_dai_stream *io = rsnd_mod_to_io(ssi_mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); int ssi_id = rsnd_mod_id(ssi_mod); /* * SSI_MODE0 */ rsnd_mod_bset(ssi_mod, SSI_MODE0, (1 << ssi_id), !use_busif << ssi_id); /* * SSI_MODE1 */ if (rsnd_ssi_is_pin_sharing(ssi_mod)) { int shift = -1; switch (ssi_id) { case 1: shift = 0; break; case 2: shift = 2; break; case 4: shift = 16; break; } if (shift >= 0) rsnd_mod_bset(ssi_mod, SSI_MODE1, 0x3 << shift, rsnd_dai_is_clk_master(rdai) ? 0x2 << shift : 0x1 << shift); } /* * DMA settings for SSIU */ if (use_busif) { u32 val = 0x76543210; u32 mask = ~0; rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR, rsnd_get_adinr(ssi_mod)); rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE, 1); rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1); mask <<= runtime->channels * 4; val = val & mask; switch (runtime->sample_bits) { case 16: val |= 0x67452301 & ~mask; break; case 32: val |= 0x76543210 & ~mask; break; } rsnd_mod_write(ssi_mod, BUSIF_DALIGN, val); } return 0; } int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod, struct rsnd_dai *rdai, int use_busif) { /* * DMA settings for SSIU */ if (use_busif) rsnd_mod_write(ssi_mod, SSI_CTRL, 0); return 0; } int rsnd_src_enable_ssi_irq(struct rsnd_mod *ssi_mod, struct rsnd_dai *rdai) { struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod); /* enable PIO interrupt if Gen2 */ if (rsnd_is_gen2(priv)) rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000); return 0; } unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv, struct rsnd_dai_stream *io, struct snd_pcm_runtime *runtime) { struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io); struct rsnd_src *src; unsigned int rate = 0; if (src_mod) { src = rsnd_mod_to_src(src_mod); /* * return convert rate if SRC is used, * otherwise, return runtime->rate as usual */ rate = rsnd_src_convert_rate(src); } if (!rate) rate = runtime->rate; return rate; } static int rsnd_src_set_convert_rate(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct rsnd_src *src = rsnd_mod_to_src(mod); u32 convert_rate = rsnd_src_convert_rate(src); u32 fsrate = 0; if (convert_rate) fsrate = 0x0400000 / convert_rate * runtime->rate; /* set/clear soft reset */ rsnd_mod_write(mod, SRC_SWRSR, 0); rsnd_mod_write(mod, SRC_SWRSR, 1); /* * Initialize the operation of the SRC internal circuits * see rsnd_src_start() */ rsnd_mod_write(mod, SRC_SRCIR, 1); /* Set channel number and output bit length */ rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod)); /* Enable the initial value of IFS */ if (fsrate) { rsnd_mod_write(mod, SRC_IFSCR, 1); /* Set initial value of IFS */ rsnd_mod_write(mod, SRC_IFSVR, fsrate); } /* use DMA transfer */ rsnd_mod_write(mod, SRC_BUSIF_MODE, 1); return 0; } static int rsnd_src_init(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_src *src = rsnd_mod_to_src(mod); clk_prepare_enable(src->clk); return 0; } static int rsnd_src_quit(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_src *src = rsnd_mod_to_src(mod); clk_disable_unprepare(src->clk); return 0; } static int rsnd_src_start(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_src *src = rsnd_mod_to_src(mod); /* * Cancel the initialization and operate the SRC function * see rsnd_src_set_convert_rate() */ rsnd_mod_write(mod, SRC_SRCIR, 0); if (rsnd_src_convert_rate(src)) rsnd_mod_write(mod, SRC_ROUTE_MODE0, 1); return 0; } static int rsnd_src_stop(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_src *src = rsnd_mod_to_src(mod); if (rsnd_src_convert_rate(src)) rsnd_mod_write(mod, SRC_ROUTE_MODE0, 0); return 0; } /* * Gen1 functions */ static int rsnd_src_set_route_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct src_route_config { u32 mask; int shift; } routes[] = { { 0xF, 0, }, /* 0 */ { 0xF, 4, }, /* 1 */ { 0xF, 8, }, /* 2 */ { 0x7, 12, }, /* 3 */ { 0x7, 16, }, /* 4 */ { 0x7, 20, }, /* 5 */ { 0x7, 24, }, /* 6 */ { 0x3, 28, }, /* 7 */ { 0x3, 30, }, /* 8 */ }; u32 mask; u32 val; int id; id = rsnd_mod_id(mod); if (id < 0 || id >= ARRAY_SIZE(routes)) return -EIO; /* * SRC_ROUTE_SELECT */ val = rsnd_dai_is_play(rdai, io) ? 0x1 : 0x2; val = val << routes[id].shift; mask = routes[id].mask << routes[id].shift; rsnd_mod_bset(mod, SRC_ROUTE_SEL, mask, val); return 0; } static int rsnd_src_set_convert_timing_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_src *src = rsnd_mod_to_src(mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); u32 convert_rate = rsnd_src_convert_rate(src); u32 mask; u32 val; int shift; int id = rsnd_mod_id(mod); int ret; /* * SRC_TIMING_SELECT */ shift = (id % 4) * 8; mask = 0x1F << shift; /* * ADG is used as source clock if SRC was used, * then, SSI WS is used as destination clock. * SSI WS is used as source clock if SRC is not used * (when playback, source/destination become reverse when capture) */ ret = 0; if (convert_rate) { /* use ADG */ val = 0; ret = rsnd_adg_set_convert_clk_gen1(priv, mod, runtime->rate, convert_rate); } else if (8 == id) { /* use SSI WS, but SRU8 is special */ val = id << shift; } else { /* use SSI WS */ val = (id + 1) << shift; } if (ret < 0) return ret; switch (id / 4) { case 0: rsnd_mod_bset(mod, SRC_TMG_SEL0, mask, val); break; case 1: rsnd_mod_bset(mod, SRC_TMG_SEL1, mask, val); break; case 2: rsnd_mod_bset(mod, SRC_TMG_SEL2, mask, val); break; } return 0; } static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { int ret; ret = rsnd_src_set_convert_rate(mod, rdai); if (ret < 0) return ret; /* Select SRC mode (fixed value) */ rsnd_mod_write(mod, SRC_SRCCR, 0x00010110); /* Set the restriction value of the FS ratio (98%) */ rsnd_mod_write(mod, SRC_MNFSR, rsnd_mod_read(mod, SRC_IFSVR) / 100 * 98); /* no SRC_BFSSR settings, since SRC_SRCCR::BUFMD is 0 */ return 0; } static int rsnd_src_probe_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); dev_dbg(dev, "%s (Gen1) is probed\n", rsnd_mod_name(mod)); return 0; } static int rsnd_src_init_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { int ret; ret = rsnd_src_init(mod, rdai); if (ret < 0) return ret; ret = rsnd_src_set_route_gen1(mod, rdai); if (ret < 0) return ret; ret = rsnd_src_set_convert_rate_gen1(mod, rdai); if (ret < 0) return ret; ret = rsnd_src_set_convert_timing_gen1(mod, rdai); if (ret < 0) return ret; return 0; } static int rsnd_src_start_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { int id = rsnd_mod_id(mod); rsnd_mod_bset(mod, SRC_ROUTE_CTRL, (1 << id), (1 << id)); return rsnd_src_start(mod, rdai); } static int rsnd_src_stop_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { int id = rsnd_mod_id(mod); rsnd_mod_bset(mod, SRC_ROUTE_CTRL, (1 << id), 0); return rsnd_src_stop(mod, rdai); } static struct rsnd_mod_ops rsnd_src_gen1_ops = { .name = SRC_NAME, .probe = rsnd_src_probe_gen1, .init = rsnd_src_init_gen1, .quit = rsnd_src_quit, .start = rsnd_src_start_gen1, .stop = rsnd_src_stop_gen1, }; /* * Gen2 functions */ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct rsnd_src *src = rsnd_mod_to_src(mod); uint ratio; int ret; /* 6 - 1/6 are very enough ratio for SRC_BSDSR */ if (!rsnd_src_convert_rate(src)) ratio = 0; else if (rsnd_src_convert_rate(src) > runtime->rate) ratio = 100 * rsnd_src_convert_rate(src) / runtime->rate; else ratio = 100 * runtime->rate / rsnd_src_convert_rate(src); if (ratio > 600) { dev_err(dev, "FSO/FSI ratio error\n"); return -EINVAL; } ret = rsnd_src_set_convert_rate(mod, rdai); if (ret < 0) return ret; rsnd_mod_write(mod, SRC_SRCCR, 0x00011110); switch (rsnd_mod_id(mod)) { case 5: case 6: case 7: case 8: rsnd_mod_write(mod, SRC_BSDSR, 0x02400000); break; default: rsnd_mod_write(mod, SRC_BSDSR, 0x01800000); break; } rsnd_mod_write(mod, SRC_BSISR, 0x00100060); return 0; } static int rsnd_src_set_convert_timing_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct rsnd_src *src = rsnd_mod_to_src(mod); u32 convert_rate = rsnd_src_convert_rate(src); int ret; if (convert_rate) ret = rsnd_adg_set_convert_clk_gen2(mod, rdai, io, runtime->rate, convert_rate); else ret = rsnd_adg_set_convert_timing_gen2(mod, rdai, io); return ret; } static int rsnd_src_probe_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_src *src = rsnd_mod_to_src(mod); struct device *dev = rsnd_priv_to_dev(priv); int ret; ret = rsnd_dma_init(priv, rsnd_mod_to_dma(mod), rsnd_info_is_playback(priv, src), src->info->dma_id); if (ret < 0) dev_err(dev, "SRC DMA failed\n"); dev_dbg(dev, "%s (Gen2) is probed\n", rsnd_mod_name(mod)); return ret; } static int rsnd_src_remove_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { rsnd_dma_quit(rsnd_mod_to_priv(mod), rsnd_mod_to_dma(mod)); return 0; } static int rsnd_src_init_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { int ret; ret = rsnd_src_init(mod, rdai); if (ret < 0) return ret; ret = rsnd_src_set_convert_rate_gen2(mod, rdai); if (ret < 0) return ret; ret = rsnd_src_set_convert_timing_gen2(mod, rdai); if (ret < 0) return ret; return 0; } static int rsnd_src_start_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct rsnd_src *src = rsnd_mod_to_src(mod); u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11; rsnd_dma_start(rsnd_mod_to_dma(&src->mod)); rsnd_mod_write(mod, SRC_CTRL, val); return rsnd_src_start(mod, rdai); } static int rsnd_src_stop_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_src *src = rsnd_mod_to_src(mod); rsnd_mod_write(mod, SRC_CTRL, 0); rsnd_dma_stop(rsnd_mod_to_dma(&src->mod)); return rsnd_src_stop(mod, rdai); } static struct rsnd_mod_ops rsnd_src_gen2_ops = { .name = SRC_NAME, .probe = rsnd_src_probe_gen2, .remove = rsnd_src_remove_gen2, .init = rsnd_src_init_gen2, .quit = rsnd_src_quit, .start = rsnd_src_start_gen2, .stop = rsnd_src_stop_gen2, }; struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id) { if (WARN_ON(id < 0 || id >= rsnd_src_nr(priv))) id = 0; return &((struct rsnd_src *)(priv->src) + id)->mod; } static void rsnd_of_parse_src(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct device_node *src_node; struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct rsnd_src_platform_info *src_info; struct device *dev = &pdev->dev; int nr; if (!of_data) return; src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src"); if (!src_node) return; nr = of_get_child_count(src_node); if (!nr) goto rsnd_of_parse_src_end; src_info = devm_kzalloc(dev, sizeof(struct rsnd_src_platform_info) * nr, GFP_KERNEL); if (!src_info) { dev_err(dev, "src info allocation error\n"); goto rsnd_of_parse_src_end; } info->src_info = src_info; info->src_info_nr = nr; rsnd_of_parse_src_end: of_node_put(src_node); } int rsnd_src_probe(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_src *src; struct rsnd_mod_ops *ops; struct clk *clk; char name[RSND_SRC_NAME_SIZE]; int i, nr; ops = NULL; if (rsnd_is_gen1(priv)) ops = &rsnd_src_gen1_ops; if (rsnd_is_gen2(priv)) ops = &rsnd_src_gen2_ops; if (!ops) { dev_err(dev, "unknown Generation\n"); return -EIO; } rsnd_of_parse_src(pdev, of_data, priv); /* * init SRC */ nr = info->src_info_nr; if (!nr) return 0; src = devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL); if (!src) { dev_err(dev, "SRC allocate failed\n"); return -ENOMEM; } priv->src_nr = nr; priv->src = src; for_each_rsnd_src(src, priv, i) { snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d", SRC_NAME, i); clk = devm_clk_get(dev, name); if (IS_ERR(clk)) return PTR_ERR(clk); src->info = &info->src_info[i]; src->clk = clk; rsnd_mod_init(priv, &src->mod, ops, RSND_MOD_SRC, i); dev_dbg(dev, "SRC%d probed\n", i); } return 0; }
gpl-2.0
synexxus/synnix
drivers/gpu/drm/nouveau/nouveau_bios.c
422
60387
/* * Copyright 2005-2006 Erik Waling * Copyright 2006 Stephane Marchesin * Copyright 2007-2009 Stuart Bennett * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <drm/drmP.h> #include "nouveau_drm.h" #include "nouveau_reg.h" #include "dispnv04/hw.h" #include "nouveau_encoder.h" #include <linux/io-mapping.h> #include <linux/firmware.h> /* these defines are made up */ #define NV_CIO_CRE_44_HEADA 0x0 #define NV_CIO_CRE_44_HEADB 0x3 #define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */ #define EDID1_LEN 128 #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) struct init_exec { bool execute; bool repeat; }; static bool nv_cksum(const uint8_t *data, unsigned int length) { /* * There's a few checksums in the BIOS, so here's a generic checking * function. */ int i; uint8_t sum = 0; for (i = 0; i < length; i++) sum += data[i]; if (sum) return true; return false; } static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk) { int compare_record_len, i = 0; uint16_t compareclk, scriptptr = 0; if (bios->major_version < 5) /* pre BIT */ compare_record_len = 3; else compare_record_len = 4; do { compareclk = ROM16(bios->data[clktable + compare_record_len * i]); if (pxclk >= compareclk * 10) { if (bios->major_version < 5) { uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i]; scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]); } else scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]); break; } i++; } while (compareclk); return scriptptr; } static void run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, struct dcb_output *dcbent, int head, bool dl) { struct nouveau_drm *drm = nouveau_drm(dev); NV_INFO(drm, "0x%04X: Parsing digital output script table\n", scriptptr); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA); nouveau_bios_run_init_table(dev, scriptptr, dcbent, head); nv04_dfp_bind_head(dev, dcbent, head, dl); } static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0); uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); if (!bios->fp.xlated_entry || !sub || !scriptofs) return -EINVAL; run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link); if (script == LVDS_PANEL_OFF) { /* off-on delay in ms */ mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7])); } #ifdef __powerpc__ /* Powerbook specific quirks */ if (script == LVDS_RESET && (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329)) nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); #endif return 0; } static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) { /* * The BIT LVDS table's header has the information to setup the * necessary registers. Following the standard 4 byte header are: * A bitmask byte and a dual-link transition pxclk value for use in * selecting the init script when not using straps; 4 script pointers * for panel power, selected by output and on/off; and 8 table pointers * for panel init, the needed one determined by output, and bits in the * conf byte. These tables are similar to the TMDS tables, consisting * of a list of pxclks and script pointers. */ struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; unsigned int outputset = (dcbent->or == 4) ? 1 : 0; uint16_t scriptptr = 0, clktable; /* * For now we assume version 3.0 table - g80 support will need some * changes */ switch (script) { case LVDS_INIT: return -ENOSYS; case LVDS_BACKLIGHT_ON: case LVDS_PANEL_ON: scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]); break; case LVDS_BACKLIGHT_OFF: case LVDS_PANEL_OFF: scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); break; case LVDS_RESET: clktable = bios->fp.lvdsmanufacturerpointer + 15; if (dcbent->or == 4) clktable += 8; if (dcbent->lvdsconf.use_straps_for_mode) { if (bios->fp.dual_link) clktable += 4; if (bios->fp.if_is_24bit) clktable += 2; } else { /* using EDID */ int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; if (bios->fp.dual_link) { clktable += 4; cmpval_24bit <<= 1; } if (bios->fp.strapless_is_24bit & cmpval_24bit) clktable += 2; } clktable = ROM16(bios->data[clktable]); if (!clktable) { NV_ERROR(drm, "Pixel clock comparison table not found\n"); return -ENOENT; } scriptptr = clkcmptable(bios, clktable, pxclk); } if (!scriptptr) { NV_ERROR(drm, "LVDS output init script not found\n"); return -ENOENT; } run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); return 0; } int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) { /* * LVDS operations are multiplexed in an effort to present a single API * which works with two vastly differing underlying structures. * This acts as the demux */ struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_object *device = &drm->device.object; struct nvbios *bios = &drm->vbios; uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; uint32_t sel_clk_binding, sel_clk; int ret; if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver || (lvds_ver >= 0x30 && script == LVDS_INIT)) return 0; if (!bios->fp.lvds_init_run) { bios->fp.lvds_init_run = true; call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk); } if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change) call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk); if (script == LVDS_RESET && bios->fp.power_off_for_reset) call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); NV_INFO(drm, "Calling LVDS script %d:\n", script); /* don't let script change pll->head binding */ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; if (lvds_ver < 0x30) ret = call_lvds_manufacturer_script(dev, dcbent, head, script); else ret = run_lvds_table(dev, dcbent, head, script, pxclk); bios->fp.last_script_invoc = (script << 1 | head); sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); return ret; } struct lvdstableheader { uint8_t lvds_ver, headerlen, recordlen; }; static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth) { /* * BMP version (0xa) LVDS table has a simple header of version and * record length. The BIT LVDS table has the typical BIT table header: * version byte, header length byte, record length byte, and a byte for * the maximum number of records that can be held in the table. */ struct nouveau_drm *drm = nouveau_drm(dev); uint8_t lvds_ver, headerlen, recordlen; memset(lth, 0, sizeof(struct lvdstableheader)); if (bios->fp.lvdsmanufacturerpointer == 0x0) { NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n"); return -EINVAL; } lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; switch (lvds_ver) { case 0x0a: /* pre NV40 */ headerlen = 2; recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; break; case 0x30: /* NV4x */ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; if (headerlen < 0x1f) { NV_ERROR(drm, "LVDS table header not understood\n"); return -EINVAL; } recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; break; case 0x40: /* G80/G90 */ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; if (headerlen < 0x7) { NV_ERROR(drm, "LVDS table header not understood\n"); return -EINVAL; } recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; break; default: NV_ERROR(drm, "LVDS table revision %d.%d not currently supported\n", lvds_ver >> 4, lvds_ver & 0xf); return -ENOSYS; } lth->lvds_ver = lvds_ver; lth->headerlen = headerlen; lth->recordlen = recordlen; return 0; } static int get_fp_strap(struct drm_device *dev, struct nvbios *bios) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_object *device = &drm->device.object; /* * The fp strap is normally dictated by the "User Strap" in * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the * Internal_Flags struct at 0x48 is set, the user strap gets overriden * by the PCI subsystem ID during POST, but not before the previous user * strap has been committed to CR58 for CR57=0xf on head A, which may be * read and used instead */ if (bios->major_version < 5 && bios->data[0x48] & 0x4) return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; else return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; } static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) { struct nouveau_drm *drm = nouveau_drm(dev); uint8_t *fptable; uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex; int ret, ofs, fpstrapping; struct lvdstableheader lth; if (bios->fp.fptablepointer == 0x0) { /* Apple cards don't have the fp table; the laptops use DDC */ /* The table is also missing on some x86 IGPs */ #ifndef __powerpc__ NV_ERROR(drm, "Pointer to flat panel table invalid\n"); #endif bios->digital_min_front_porch = 0x4b; return 0; } fptable = &bios->data[bios->fp.fptablepointer]; fptable_ver = fptable[0]; switch (fptable_ver) { /* * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no * version field, and miss one of the spread spectrum/PWM bytes. * This could affect early GF2Go parts (not seen any appropriate ROMs * though). Here we assume that a version of 0x05 matches this case * (combining with a BMP version check would be better), as the * common case for the panel type field is 0x0005, and that is in * fact what we are reading the first byte of. */ case 0x05: /* some NV10, 11, 15, 16 */ recordlen = 42; ofs = -1; break; case 0x10: /* some NV15/16, and NV11+ */ recordlen = 44; ofs = 0; break; case 0x20: /* NV40+ */ headerlen = fptable[1]; recordlen = fptable[2]; fpentries = fptable[3]; /* * fptable[4] is the minimum * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap */ bios->digital_min_front_porch = fptable[4]; ofs = -7; break; default: NV_ERROR(drm, "FP table revision %d.%d not currently supported\n", fptable_ver >> 4, fptable_ver & 0xf); return -ENOSYS; } if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */ return 0; ret = parse_lvds_manufacturer_table_header(dev, bios, &lth); if (ret) return ret; if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) { bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer + lth.headerlen + 1; bios->fp.xlatwidth = lth.recordlen; } if (bios->fp.fpxlatetableptr == 0x0) { NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n"); return -EINVAL; } fpstrapping = get_fp_strap(dev, bios); fpindex = bios->data[bios->fp.fpxlatetableptr + fpstrapping * bios->fp.xlatwidth]; if (fpindex > fpentries) { NV_ERROR(drm, "Bad flat panel table index\n"); return -ENOENT; } /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ if (lth.lvds_ver > 0x10) bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; /* * If either the strap or xlated fpindex value are 0xf there is no * panel using a strap-derived bios mode present. this condition * includes, but is different from, the DDC panel indicator above */ if (fpstrapping == 0xf || fpindex == 0xf) return 0; bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen + recordlen * fpindex + ofs; NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n", ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1, ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1, ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10); return 0; } bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; if (!mode) /* just checking whether we can produce a mode */ return bios->fp.mode_ptr; memset(mode, 0, sizeof(struct drm_display_mode)); /* * For version 1.0 (version in byte 0): * bytes 1-2 are "panel type", including bits on whether Colour/mono, * single/dual link, and type (TFT etc.) * bytes 3-6 are bits per colour in RGBX */ mode->clock = ROM16(mode_entry[7]) * 10; /* bytes 9-10 is HActive */ mode->hdisplay = ROM16(mode_entry[11]) + 1; /* * bytes 13-14 is HValid Start * bytes 15-16 is HValid End */ mode->hsync_start = ROM16(mode_entry[17]) + 1; mode->hsync_end = ROM16(mode_entry[19]) + 1; mode->htotal = ROM16(mode_entry[21]) + 1; /* bytes 23-24, 27-30 similarly, but vertical */ mode->vdisplay = ROM16(mode_entry[25]) + 1; mode->vsync_start = ROM16(mode_entry[31]) + 1; mode->vsync_end = ROM16(mode_entry[33]) + 1; mode->vtotal = ROM16(mode_entry[35]) + 1; mode->flags |= (mode_entry[37] & 0x10) ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; mode->flags |= (mode_entry[37] & 0x1) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; /* * bytes 38-39 relate to spread spectrum settings * bytes 40-43 are something to do with PWM */ mode->status = MODE_OK; mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(mode); return bios->fp.mode_ptr; } int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit) { /* * The LVDS table header is (mostly) described in * parse_lvds_manufacturer_table_header(): the BIT header additionally * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if * straps are not being used for the panel, this specifies the frequency * at which modes should be set up in the dual link style. * * Following the header, the BMP (ver 0xa) table has several records, * indexed by a separate xlat table, indexed in turn by the fp strap in * EXTDEV_BOOT. Each record had a config byte, followed by 6 script * numbers for use by INIT_SUB which controlled panel init and power, * and finally a dword of ms to sleep between power off and on * operations. * * In the BIT versions, the table following the header serves as an * integrated config and xlat table: the records in the table are * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has * two bytes - the first as a config byte, the second for indexing the * fp mode table pointed to by the BIT 'D' table * * DDC is not used until after card init, so selecting the correct table * entry and setting the dual link flag for EDID equipped panels, * requiring tests against the native-mode pixel clock, cannot be done * until later, when this function should be called with non-zero pxclk */ struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; struct lvdstableheader lth; uint16_t lvdsofs; int ret, chip_version = bios->chip_version; ret = parse_lvds_manufacturer_table_header(dev, bios, &lth); if (ret) return ret; switch (lth.lvds_ver) { case 0x0a: /* pre NV40 */ lvdsmanufacturerindex = bios->data[ bios->fp.fpxlatemanufacturertableptr + fpstrapping]; /* we're done if this isn't the EDID panel case */ if (!pxclk) break; if (chip_version < 0x25) { /* nv17 behaviour * * It seems the old style lvds script pointer is reused * to select 18/24 bit colour depth for EDID panels. */ lvdsmanufacturerindex = (bios->legacy.lvds_single_a_script_ptr & 1) ? 2 : 0; if (pxclk >= bios->fp.duallink_transition_clk) lvdsmanufacturerindex++; } else if (chip_version < 0x30) { /* nv28 behaviour (off-chip encoder) * * nv28 does a complex dance of first using byte 121 of * the EDID to choose the lvdsmanufacturerindex, then * later attempting to match the EDID manufacturer and * product IDs in a table (signature 'pidt' (panel id * table?)), setting an lvdsmanufacturerindex of 0 and * an fp strap of the match index (or 0xf if none) */ lvdsmanufacturerindex = 0; } else { /* nv31, nv34 behaviour */ lvdsmanufacturerindex = 0; if (pxclk >= bios->fp.duallink_transition_clk) lvdsmanufacturerindex = 2; if (pxclk >= 140000) lvdsmanufacturerindex = 3; } /* * nvidia set the high nibble of (cr57=f, cr58) to * lvdsmanufacturerindex in this case; we don't */ break; case 0x30: /* NV4x */ case 0x40: /* G80/G90 */ lvdsmanufacturerindex = fpstrapping; break; default: NV_ERROR(drm, "LVDS table revision not currently supported\n"); return -ENOSYS; } lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex; switch (lth.lvds_ver) { case 0x0a: bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1; bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2; bios->fp.dual_link = bios->data[lvdsofs] & 4; bios->fp.link_c_increment = bios->data[lvdsofs] & 8; *if_is_24bit = bios->data[lvdsofs] & 16; break; case 0x30: case 0x40: /* * No sign of the "power off for reset" or "reset for panel * on" bits, but it's safer to assume we should */ bios->fp.power_off_for_reset = true; bios->fp.reset_after_pclk_change = true; /* * It's ok lvdsofs is wrong for nv4x edid case; dual_link is * over-written, and if_is_24bit isn't used */ bios->fp.dual_link = bios->data[lvdsofs] & 1; bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; break; } /* set dual_link flag for EDID case */ if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); *dl = bios->fp.dual_link; return 0; } int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) { /* * the pxclk parameter is in kHz * * This runs the TMDS regs setting code found on BIT bios cards * * For ffs(or) == 1 use the first table, for ffs(or) == 2 and * ffs(or) == 3, use the second. */ struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_object *device = &drm->device.object; struct nvbios *bios = &drm->vbios; int cv = bios->chip_version; uint16_t clktable = 0, scriptptr; uint32_t sel_clk_binding, sel_clk; /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */ if (cv >= 0x17 && cv != 0x1a && cv != 0x20 && dcbent->location != DCB_LOC_ON_CHIP) return 0; switch (ffs(dcbent->or)) { case 1: clktable = bios->tmds.output0_script_ptr; break; case 2: case 3: clktable = bios->tmds.output1_script_ptr; break; } if (!clktable) { NV_ERROR(drm, "Pixel clock comparison table not found\n"); return -EINVAL; } scriptptr = clkcmptable(bios, clktable, pxclk); if (!scriptptr) { NV_ERROR(drm, "TMDS output init script not found\n"); return -ENOENT; } /* don't let script change pll->head binding */ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); return 0; } static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset) { /* * Parses the init table segment for pointers used in script execution. * * offset + 0 (16 bits): init script tables pointer * offset + 2 (16 bits): macro index table pointer * offset + 4 (16 bits): macro table pointer * offset + 6 (16 bits): condition table pointer * offset + 8 (16 bits): io condition table pointer * offset + 10 (16 bits): io flag condition table pointer * offset + 12 (16 bits): init function table pointer */ bios->init_script_tbls_ptr = ROM16(bios->data[offset]); } static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * Parses the load detect values for g80 cards. * * offset + 0 (16 bits): loadval table pointer */ struct nouveau_drm *drm = nouveau_drm(dev); uint16_t load_table_ptr; uint8_t version, headerlen, entrylen, num_entries; if (bitentry->length != 3) { NV_ERROR(drm, "Do not understand BIT A table\n"); return -EINVAL; } load_table_ptr = ROM16(bios->data[bitentry->offset]); if (load_table_ptr == 0x0) { NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n"); return -EINVAL; } version = bios->data[load_table_ptr]; if (version != 0x10) { NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n", version >> 4, version & 0xF); return -ENOSYS; } headerlen = bios->data[load_table_ptr + 1]; entrylen = bios->data[load_table_ptr + 2]; num_entries = bios->data[load_table_ptr + 3]; if (headerlen != 4 || entrylen != 4 || num_entries != 2) { NV_ERROR(drm, "Do not understand BIT loadval table\n"); return -EINVAL; } /* First entry is normal dac, 2nd tv-out perhaps? */ bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; return 0; } static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * Parses the flat panel table segment that the bit entry points to. * Starting at bitentry->offset: * * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte * records beginning with a freq. * offset + 2 (16 bits): mode table pointer */ struct nouveau_drm *drm = nouveau_drm(dev); if (bitentry->length != 4) { NV_ERROR(drm, "Do not understand BIT display table\n"); return -EINVAL; } bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]); return 0; } static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * Parses the init table segment that the bit entry points to. * * See parse_script_table_pointers for layout */ struct nouveau_drm *drm = nouveau_drm(dev); if (bitentry->length < 14) { NV_ERROR(drm, "Do not understand init table\n"); return -EINVAL; } parse_script_table_pointers(bios, bitentry->offset); return 0; } static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * BIT 'i' (info?) table * * offset + 0 (32 bits): BIOS version dword (as in B table) * offset + 5 (8 bits): BIOS feature byte (same as for BMP?) * offset + 13 (16 bits): pointer to table containing DAC load * detection comparison values * * There's other things in the table, purpose unknown */ struct nouveau_drm *drm = nouveau_drm(dev); uint16_t daccmpoffset; uint8_t dacver, dacheaderlen; if (bitentry->length < 6) { NV_ERROR(drm, "BIT i table too short for needed information\n"); return -EINVAL; } /* * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's * Quadro identity crisis), other bits possibly as for BMP feature byte */ bios->feature_byte = bios->data[bitentry->offset + 5]; bios->is_mobile = bios->feature_byte & FEATURE_MOBILE; if (bitentry->length < 15) { NV_WARN(drm, "BIT i table not long enough for DAC load " "detection comparison table\n"); return -EINVAL; } daccmpoffset = ROM16(bios->data[bitentry->offset + 13]); /* doesn't exist on g80 */ if (!daccmpoffset) return 0; /* * The first value in the table, following the header, is the * comparison value, the second entry is a comparison value for * TV load detection. */ dacver = bios->data[daccmpoffset]; dacheaderlen = bios->data[daccmpoffset + 1]; if (dacver != 0x00 && dacver != 0x10) { NV_WARN(drm, "DAC load detection comparison table version " "%d.%d not known\n", dacver >> 4, dacver & 0xf); return -ENOSYS; } bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); return 0; } static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * Parses the LVDS table segment that the bit entry points to. * Starting at bitentry->offset: * * offset + 0 (16 bits): LVDS strap xlate table pointer */ struct nouveau_drm *drm = nouveau_drm(dev); if (bitentry->length != 2) { NV_ERROR(drm, "Do not understand BIT LVDS table\n"); return -EINVAL; } /* * No idea if it's still called the LVDS manufacturer table, but * the concept's close enough. */ bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]); return 0; } static int parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * offset + 2 (8 bits): number of options in an * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set * offset + 3 (16 bits): pointer to strap xlate table for RAM * restrict option selection * * There's a bunch of bits in this table other than the RAM restrict * stuff that we don't use - their use currently unknown */ /* * Older bios versions don't have a sufficiently long table for * what we want */ if (bitentry->length < 0x5) return 0; if (bitentry->version < 2) { bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); } else { bios->ram_restrict_group_count = bios->data[bitentry->offset + 0]; bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]); } return 0; } static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) { /* * Parses the pointer to the TMDS table * * Starting at bitentry->offset: * * offset + 0 (16 bits): TMDS table pointer * * The TMDS table is typically found just before the DCB table, with a * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being * length?) * * At offset +7 is a pointer to a script, which I don't know how to * run yet. * At offset +9 is a pointer to another script, likewise * Offset +11 has a pointer to a table where the first word is a pxclk * frequency and the second word a pointer to a script, which should be * run if the comparison pxclk frequency is less than the pxclk desired. * This repeats for decreasing comparison frequencies * Offset +13 has a pointer to a similar table * The selection of table (and possibly +7/+9 script) is dictated by * "or" from the DCB. */ struct nouveau_drm *drm = nouveau_drm(dev); uint16_t tmdstableptr, script1, script2; if (bitentry->length != 2) { NV_ERROR(drm, "Do not understand BIT TMDS table\n"); return -EINVAL; } tmdstableptr = ROM16(bios->data[bitentry->offset]); if (!tmdstableptr) { NV_ERROR(drm, "Pointer to TMDS table invalid\n"); return -EINVAL; } NV_INFO(drm, "TMDS table version %d.%d\n", bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); /* nv50+ has v2.0, but we don't parse it atm */ if (bios->data[tmdstableptr] != 0x11) return -ENOSYS; /* * These two scripts are odd: they don't seem to get run even when * they are not stubbed. */ script1 = ROM16(bios->data[tmdstableptr + 7]); script2 = ROM16(bios->data[tmdstableptr + 9]); if (bios->data[script1] != 'q' || bios->data[script2] != 'q') NV_WARN(drm, "TMDS table script pointers not stubbed\n"); bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]); bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]); return 0; } struct bit_table { const char id; int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); }; #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) int bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; u8 entries, *entry; if (bios->type != NVBIOS_BIT) return -ENODEV; entries = bios->data[bios->offset + 10]; entry = &bios->data[bios->offset + 12]; while (entries--) { if (entry[0] == id) { bit->id = entry[0]; bit->version = entry[1]; bit->length = ROM16(entry[2]); bit->offset = ROM16(entry[4]); bit->data = ROMPTR(dev, entry[4]); return 0; } entry += bios->data[bios->offset + 9]; } return -ENOENT; } static int parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, struct bit_table *table) { struct drm_device *dev = bios->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct bit_entry bitentry; if (bit_table(dev, table->id, &bitentry) == 0) return table->parse_fn(dev, bios, &bitentry); NV_INFO(drm, "BIT table '%c' not found\n", table->id); return -ENOSYS; } static int parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) { int ret; /* * The only restriction on parsing order currently is having 'i' first * for use of bios->*_version or bios->feature_byte while parsing; * functions shouldn't be actually *doing* anything apart from pulling * data from the image into the bios struct, thus no interdependencies */ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i)); if (ret) /* info? */ return ret; if (bios->major_version >= 0x60) /* g80+ */ parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A)); parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display)); ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init)); if (ret) return ret; parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); return 0; } static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset) { /* * Parses the BMP structure for useful things, but does not act on them * * offset + 5: BMP major version * offset + 6: BMP minor version * offset + 9: BMP feature byte * offset + 10: BCD encoded BIOS version * * offset + 18: init script table pointer (for bios versions < 5.10h) * offset + 20: extra init script table pointer (for bios * versions < 5.10h) * * offset + 24: memory init table pointer (used on early bios versions) * offset + 26: SDR memory sequencing setup data table * offset + 28: DDR memory sequencing setup data table * * offset + 54: index of I2C CRTC pair to use for CRT output * offset + 55: index of I2C CRTC pair to use for TV output * offset + 56: index of I2C CRTC pair to use for flat panel output * offset + 58: write CRTC index for I2C pair 0 * offset + 59: read CRTC index for I2C pair 0 * offset + 60: write CRTC index for I2C pair 1 * offset + 61: read CRTC index for I2C pair 1 * * offset + 67: maximum internal PLL frequency (single stage PLL) * offset + 71: minimum internal PLL frequency (single stage PLL) * * offset + 75: script table pointers, as described in * parse_script_table_pointers * * offset + 89: TMDS single link output A table pointer * offset + 91: TMDS single link output B table pointer * offset + 95: LVDS single link output A table pointer * offset + 105: flat panel timings table pointer * offset + 107: flat panel strapping translation table pointer * offset + 117: LVDS manufacturer panel config table pointer * offset + 119: LVDS manufacturer strapping translation table pointer * * offset + 142: PLL limits table pointer * * offset + 156: minimum pixel clock for LVDS dual link */ struct nouveau_drm *drm = nouveau_drm(dev); uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor; uint16_t bmplength; uint16_t legacy_scripts_offset, legacy_i2c_offset; /* load needed defaults in case we can't parse this info */ bios->digital_min_front_porch = 0x4b; bios->fmaxvco = 256000; bios->fminvco = 128000; bios->fp.duallink_transition_clk = 90000; bmp_version_major = bmp[5]; bmp_version_minor = bmp[6]; NV_INFO(drm, "BMP version %d.%d\n", bmp_version_major, bmp_version_minor); /* * Make sure that 0x36 is blank and can't be mistaken for a DCB * pointer on early versions */ if (bmp_version_major < 5) *(uint16_t *)&bios->data[0x36] = 0; /* * Seems that the minor version was 1 for all major versions prior * to 5. Version 6 could theoretically exist, but I suspect BIT * happened instead. */ if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) { NV_ERROR(drm, "You have an unsupported BMP version. " "Please send in your bios\n"); return -ENOSYS; } if (bmp_version_major == 0) /* nothing that's currently useful in this version */ return 0; else if (bmp_version_major == 1) bmplength = 44; /* exact for 1.01 */ else if (bmp_version_major == 2) bmplength = 48; /* exact for 2.01 */ else if (bmp_version_major == 3) bmplength = 54; /* guessed - mem init tables added in this version */ else if (bmp_version_major == 4 || bmp_version_minor < 0x1) /* don't know if 5.0 exists... */ bmplength = 62; /* guessed - BMP I2C indices added in version 4*/ else if (bmp_version_minor < 0x6) bmplength = 67; /* exact for 5.01 */ else if (bmp_version_minor < 0x10) bmplength = 75; /* exact for 5.06 */ else if (bmp_version_minor == 0x10) bmplength = 89; /* exact for 5.10h */ else if (bmp_version_minor < 0x14) bmplength = 118; /* exact for 5.11h */ else if (bmp_version_minor < 0x24) /* * Not sure of version where pll limits came in; * certainly exist by 0x24 though. */ /* length not exact: this is long enough to get lvds members */ bmplength = 123; else if (bmp_version_minor < 0x27) /* * Length not exact: this is long enough to get pll limit * member */ bmplength = 144; else /* * Length not exact: this is long enough to get dual link * transition clock. */ bmplength = 158; /* checksum */ if (nv_cksum(bmp, 8)) { NV_ERROR(drm, "Bad BMP checksum\n"); return -EINVAL; } /* * Bit 4 seems to indicate either a mobile bios or a quadro card -- * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl * (not nv10gl), bit 5 that the flat panel tables are present, and * bit 6 a tv bios. */ bios->feature_byte = bmp[9]; if (bmp_version_major < 5 || bmp_version_minor < 0x10) bios->old_style_init = true; legacy_scripts_offset = 18; if (bmp_version_major < 2) legacy_scripts_offset -= 4; bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]); bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]); if (bmp_version_major > 2) { /* appears in BMP 3 */ bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]); bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]); bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]); } legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */ if (bmplength > 61) legacy_i2c_offset = offset + 54; bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; if (bmplength > 74) { bios->fmaxvco = ROM32(bmp[67]); bios->fminvco = ROM32(bmp[71]); } if (bmplength > 88) parse_script_table_pointers(bios, offset + 75); if (bmplength > 94) { bios->tmds.output0_script_ptr = ROM16(bmp[89]); bios->tmds.output1_script_ptr = ROM16(bmp[91]); /* * Never observed in use with lvds scripts, but is reused for * 18/24 bit panel interface default for EDID equipped panels * (if_is_24bit not set directly to avoid any oscillation). */ bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]); } if (bmplength > 108) { bios->fp.fptablepointer = ROM16(bmp[105]); bios->fp.fpxlatetableptr = ROM16(bmp[107]); bios->fp.xlatwidth = 1; } if (bmplength > 120) { bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]); bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]); } #if 0 if (bmplength > 143) bios->pll_limit_tbl_ptr = ROM16(bmp[142]); #endif if (bmplength > 157) bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10; return 0; } static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) { int i, j; for (i = 0; i <= (n - len); i++) { for (j = 0; j < len; j++) if (data[i + j] != str[j]) break; if (j == len) return i; } return 0; } void * olddcb_table(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); u8 *dcb = NULL; if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) dcb = ROMPTR(dev, drm->vbios.data[0x36]); if (!dcb) { NV_WARN(drm, "No DCB data found in VBIOS\n"); return NULL; } if (dcb[0] >= 0x42) { NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]); return NULL; } else if (dcb[0] >= 0x30) { if (ROM32(dcb[6]) == 0x4edcbdcb) return dcb; } else if (dcb[0] >= 0x20) { if (ROM32(dcb[4]) == 0x4edcbdcb) return dcb; } else if (dcb[0] >= 0x15) { if (!memcmp(&dcb[-7], "DEV_REC", 7)) return dcb; } else { /* * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but * always has the same single (crt) entry, even when tv-out * present, so the conclusion is this version cannot really * be used. * * v1.2 tables (some NV6/10, and NV15+) normally have the * same 5 entries, which are not specific to the card and so * no use. * * v1.2 does have an I2C table that read_dcb_i2c_table can * handle, but cards exist (nv11 in #14821) with a bad i2c * table pointer, so use the indices parsed in * parse_bmp_structure. * * v1.1 (NV5+, maybe some NV4) is entirely unhelpful */ NV_WARN(drm, "No useful DCB data in VBIOS\n"); return NULL; } NV_WARN(drm, "DCB header validation failed\n"); return NULL; } void * olddcb_outp(struct drm_device *dev, u8 idx) { u8 *dcb = olddcb_table(dev); if (dcb && dcb[0] >= 0x30) { if (idx < dcb[2]) return dcb + dcb[1] + (idx * dcb[3]); } else if (dcb && dcb[0] >= 0x20) { u8 *i2c = ROMPTR(dev, dcb[2]); u8 *ent = dcb + 8 + (idx * 8); if (i2c && ent < i2c) return ent; } else if (dcb && dcb[0] >= 0x15) { u8 *i2c = ROMPTR(dev, dcb[2]); u8 *ent = dcb + 4 + (idx * 10); if (i2c && ent < i2c) return ent; } return NULL; } int olddcb_outp_foreach(struct drm_device *dev, void *data, int (*exec)(struct drm_device *, void *, int idx, u8 *outp)) { int ret, idx = -1; u8 *outp = NULL; while ((outp = olddcb_outp(dev, ++idx))) { if (ROM32(outp[0]) == 0x00000000) break; /* seen on an NV11 with DCB v1.5 */ if (ROM32(outp[0]) == 0xffffffff) break; /* seen on an NV17 with DCB v2.0 */ if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED) continue; if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL) break; ret = exec(dev, data, idx, outp); if (ret) return ret; } return 0; } u8 * olddcb_conntab(struct drm_device *dev) { u8 *dcb = olddcb_table(dev); if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) { u8 *conntab = ROMPTR(dev, dcb[0x14]); if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40) return conntab; } return NULL; } u8 * olddcb_conn(struct drm_device *dev, u8 idx) { u8 *conntab = olddcb_conntab(dev); if (conntab && idx < conntab[2]) return conntab + conntab[1] + (idx * conntab[3]); return NULL; } static struct dcb_output *new_dcb_entry(struct dcb_table *dcb) { struct dcb_output *entry = &dcb->entry[dcb->entries]; memset(entry, 0, sizeof(struct dcb_output)); entry->index = dcb->entries++; return entry; } static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, int heads, int or) { struct dcb_output *entry = new_dcb_entry(dcb); entry->type = type; entry->i2c_index = i2c; entry->heads = heads; if (type != DCB_OUTPUT_ANALOG) entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ entry->or = or; } static bool parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_output *entry) { struct nouveau_drm *drm = nouveau_drm(dev); int link = 0; entry->type = conn & 0xf; entry->i2c_index = (conn >> 4) & 0xf; entry->heads = (conn >> 8) & 0xf; entry->connector = (conn >> 12) & 0xf; entry->bus = (conn >> 16) & 0xf; entry->location = (conn >> 20) & 0x3; entry->or = (conn >> 24) & 0xf; switch (entry->type) { case DCB_OUTPUT_ANALOG: /* * Although the rest of a CRT conf dword is usually * zeros, mac biosen have stuff there so we must mask */ entry->crtconf.maxfreq = (dcb->version < 0x30) ? (conf & 0xffff) * 10 : (conf & 0xff) * 10000; break; case DCB_OUTPUT_LVDS: { uint32_t mask; if (conf & 0x1) entry->lvdsconf.use_straps_for_mode = true; if (dcb->version < 0x22) { mask = ~0xd; /* * The laptop in bug 14567 lies and claims to not use * straps when it does, so assume all DCB 2.0 laptops * use straps, until a broken EDID using one is produced */ entry->lvdsconf.use_straps_for_mode = true; /* * Both 0x4 and 0x8 show up in v2.0 tables; assume they * mean the same thing (probably wrong, but might work) */ if (conf & 0x4 || conf & 0x8) entry->lvdsconf.use_power_scripts = true; } else { mask = ~0x7; if (conf & 0x2) entry->lvdsconf.use_acpi_for_edid = true; if (conf & 0x4) entry->lvdsconf.use_power_scripts = true; entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; link = entry->lvdsconf.sor.link; } if (conf & mask) { /* * Until we even try to use these on G8x, it's * useless reporting unknown bits. They all are. */ if (dcb->version >= 0x40) break; NV_ERROR(drm, "Unknown LVDS configuration bits, " "please report\n"); } break; } case DCB_OUTPUT_TV: { if (dcb->version >= 0x30) entry->tvconf.has_component_output = conf & (0x8 << 4); else entry->tvconf.has_component_output = false; break; } case DCB_OUTPUT_DP: entry->dpconf.sor.link = (conf & 0x00000030) >> 4; entry->extdev = (conf & 0x0000ff00) >> 8; switch ((conf & 0x00e00000) >> 21) { case 0: entry->dpconf.link_bw = 162000; break; case 1: entry->dpconf.link_bw = 270000; break; default: entry->dpconf.link_bw = 540000; break; } switch ((conf & 0x0f000000) >> 24) { case 0xf: case 0x4: entry->dpconf.link_nr = 4; break; case 0x3: case 0x2: entry->dpconf.link_nr = 2; break; default: entry->dpconf.link_nr = 1; break; } link = entry->dpconf.sor.link; break; case DCB_OUTPUT_TMDS: if (dcb->version >= 0x40) { entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; entry->extdev = (conf & 0x0000ff00) >> 8; link = entry->tmdsconf.sor.link; } else if (dcb->version >= 0x30) entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; else if (dcb->version >= 0x22) entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; break; case DCB_OUTPUT_EOL: /* weird g80 mobile type that "nv" treats as a terminator */ dcb->entries--; return false; default: break; } if (dcb->version < 0x40) { /* Normal entries consist of a single bit, but dual link has * the next most significant bit set too */ entry->duallink_possible = ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); } else { entry->duallink_possible = (entry->sorconf.link == 3); } /* unsure what DCB version introduces this, 3.0? */ if (conf & 0x100000) entry->i2c_upper_default = true; entry->hasht = (entry->location << 4) | entry->type; entry->hashm = (entry->heads << 8) | (link << 6) | entry->or; return true; } static bool parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_output *entry) { struct nouveau_drm *drm = nouveau_drm(dev); switch (conn & 0x0000000f) { case 0: entry->type = DCB_OUTPUT_ANALOG; break; case 1: entry->type = DCB_OUTPUT_TV; break; case 2: case 4: if (conn & 0x10) entry->type = DCB_OUTPUT_LVDS; else entry->type = DCB_OUTPUT_TMDS; break; case 3: entry->type = DCB_OUTPUT_LVDS; break; default: NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f); return false; } entry->i2c_index = (conn & 0x0003c000) >> 14; entry->heads = ((conn & 0x001c0000) >> 18) + 1; entry->or = entry->heads; /* same as heads, hopefully safe enough */ entry->location = (conn & 0x01e00000) >> 21; entry->bus = (conn & 0x0e000000) >> 25; entry->duallink_possible = false; switch (entry->type) { case DCB_OUTPUT_ANALOG: entry->crtconf.maxfreq = (conf & 0xffff) * 10; break; case DCB_OUTPUT_TV: entry->tvconf.has_component_output = false; break; case DCB_OUTPUT_LVDS: if ((conn & 0x00003f00) >> 8 != 0x10) entry->lvdsconf.use_straps_for_mode = true; entry->lvdsconf.use_power_scripts = true; break; default: break; } return true; } static void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) { /* * DCB v2.0 lists each output combination separately. * Here we merge compatible entries to have fewer outputs, with * more options */ struct nouveau_drm *drm = nouveau_drm(dev); int i, newentries = 0; for (i = 0; i < dcb->entries; i++) { struct dcb_output *ient = &dcb->entry[i]; int j; for (j = i + 1; j < dcb->entries; j++) { struct dcb_output *jent = &dcb->entry[j]; if (jent->type == 100) /* already merged entry */ continue; /* merge heads field when all other fields the same */ if (jent->i2c_index == ient->i2c_index && jent->type == ient->type && jent->location == ient->location && jent->or == ient->or) { NV_INFO(drm, "Merging DCB entries %d and %d\n", i, j); ient->heads |= jent->heads; jent->type = 100; /* dummy value */ } } } /* Compact entries merged into others out of dcb */ for (i = 0; i < dcb->entries; i++) { if (dcb->entry[i].type == 100) continue; if (newentries != i) { dcb->entry[newentries] = dcb->entry[i]; dcb->entry[newentries].index = newentries; } newentries++; } dcb->entries = newentries; } static bool apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) { struct nouveau_drm *drm = nouveau_drm(dev); struct dcb_table *dcb = &drm->vbios.dcb; /* Dell Precision M6300 * DCB entry 2: 02025312 00000010 * DCB entry 3: 02026312 00000020 * * Identical, except apparently a different connector on a * different SOR link. Not a clue how we're supposed to know * which one is in use if it even shares an i2c line... * * Ignore the connector on the second SOR link to prevent * nasty problems until this is sorted (assuming it's not a * VBIOS bug). */ if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) { if (*conn == 0x02026312 && *conf == 0x00000020) return false; } /* GeForce3 Ti 200 * * DCB reports an LVDS output that should be TMDS: * DCB entry 1: f2005014 ffffffff */ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { if (*conn == 0xf2005014 && *conf == 0xffffffff) { fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); return false; } } /* XFX GT-240X-YA * * So many things wrong here, replace the entire encoder table.. */ if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { if (idx == 0) { *conn = 0x02001300; /* VGA, connector 1 */ *conf = 0x00000028; } else if (idx == 1) { *conn = 0x01010312; /* DVI, connector 0 */ *conf = 0x00020030; } else if (idx == 2) { *conn = 0x01010310; /* VGA, connector 0 */ *conf = 0x00000028; } else if (idx == 3) { *conn = 0x02022362; /* HDMI, connector 2 */ *conf = 0x00020010; } else { *conn = 0x0000000e; /* EOL */ *conf = 0x00000000; } } /* Some other twisted XFX board (rhbz#694914) * * The DVI/VGA encoder combo that's supposed to represent the * DVI-I connector actually point at two different ones, and * the HDMI connector ends up paired with the VGA instead. * * Connector table is missing anything for VGA at all, pointing it * an invalid conntab entry 2 so we figure it out ourself. */ if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) { if (idx == 0) { *conn = 0x02002300; /* VGA, connector 2 */ *conf = 0x00000028; } else if (idx == 1) { *conn = 0x01010312; /* DVI, connector 0 */ *conf = 0x00020030; } else if (idx == 2) { *conn = 0x04020310; /* VGA, connector 0 */ *conf = 0x00000028; } else if (idx == 3) { *conn = 0x02021322; /* HDMI, connector 1 */ *conf = 0x00020010; } else { *conn = 0x0000000e; /* EOL */ *conf = 0x00000000; } } /* fdo#50830: connector indices for VGA and DVI-I are backwards */ if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) { if (idx == 0 && *conn == 0x02000300) *conn = 0x02011300; else if (idx == 1 && *conn == 0x04011310) *conn = 0x04000310; else if (idx == 2 && *conn == 0x02011312) *conn = 0x02000312; } return true; } static void fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) { struct dcb_table *dcb = &bios->dcb; int all_heads = (nv_two_heads(dev) ? 3 : 1); #ifdef __powerpc__ /* Apple iMac G4 NV17 */ if (of_machine_is_compatible("PowerMac4,5")) { fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); return; } #endif /* Make up some sane defaults */ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, bios->legacy.i2c_indices.crt, 1, 1); if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) fabricate_dcb_output(dcb, DCB_OUTPUT_TV, bios->legacy.i2c_indices.tv, all_heads, 0); else if (bios->tmds.output0_script_ptr || bios->tmds.output1_script_ptr) fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, bios->legacy.i2c_indices.panel, all_heads, 1); } static int parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp) { struct nouveau_drm *drm = nouveau_drm(dev); struct dcb_table *dcb = &drm->vbios.dcb; u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]); u32 conn = ROM32(outp[0]); bool ret; if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) { struct dcb_output *entry = new_dcb_entry(dcb); NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf); if (dcb->version >= 0x20) ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); else ret = parse_dcb15_entry(dev, dcb, conn, conf, entry); if (!ret) return 1; /* stop parsing */ /* Ignore the I2C index for on-chip TV-out, as there * are cards with bogus values (nv31m in bug 23212), * and it's otherwise useless. */ if (entry->type == DCB_OUTPUT_TV && entry->location == DCB_LOC_ON_CHIP) entry->i2c_index = 0x0f; } return 0; } static void dcb_fake_connectors(struct nvbios *bios) { struct dcb_table *dcbt = &bios->dcb; u8 map[16] = { }; int i, idx = 0; /* heuristic: if we ever get a non-zero connector field, assume * that all the indices are valid and we don't need fake them. * * and, as usual, a blacklist of boards with bad bios data.. */ if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) { for (i = 0; i < dcbt->entries; i++) { if (dcbt->entry[i].connector) return; } } /* no useful connector info available, we need to make it up * ourselves. the rule here is: anything on the same i2c bus * is considered to be on the same connector. any output * without an associated i2c bus is assigned its own unique * connector index. */ for (i = 0; i < dcbt->entries; i++) { u8 i2c = dcbt->entry[i].i2c_index; if (i2c == 0x0f) { dcbt->entry[i].connector = idx++; } else { if (!map[i2c]) map[i2c] = ++idx; dcbt->entry[i].connector = map[i2c] - 1; } } /* if we created more than one connector, destroy the connector * table - just in case it has random, rather than stub, entries. */ if (i > 1) { u8 *conntab = olddcb_conntab(bios->dev); if (conntab) conntab[0] = 0x00; } } static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios) { struct nouveau_drm *drm = nouveau_drm(dev); struct dcb_table *dcb = &bios->dcb; u8 *dcbt, *conn; int idx; dcbt = olddcb_table(dev); if (!dcbt) { /* handle pre-DCB boards */ if (bios->type == NVBIOS_BMP) { fabricate_dcb_encoder_table(dev, bios); return 0; } return -EINVAL; } NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf); dcb->version = dcbt[0]; olddcb_outp_foreach(dev, NULL, parse_dcb_entry); /* * apart for v2.1+ not being known for requiring merging, this * guarantees dcbent->index is the index of the entry in the rom image */ if (dcb->version < 0x21) merge_like_dcb_entries(dev, dcb); /* dump connector table entries to log, if any exist */ idx = -1; while ((conn = olddcb_conn(dev, ++idx))) { if (conn[0] != 0xff) { if (olddcb_conntab(dev)[3] < 4) NV_INFO(drm, "DCB conn %02d: %04x\n", idx, ROM16(conn[0])); else NV_INFO(drm, "DCB conn %02d: %08x\n", idx, ROM32(conn[0])); } } dcb_fake_connectors(bios); return 0; } static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry) { /* * The header following the "HWSQ" signature has the number of entries, * and the entry size * * An entry consists of a dword to write to the sequencer control reg * (0x00001304), followed by the ucode bytes, written sequentially, * starting at reg 0x00001400 */ struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_object *device = &drm->device.object; uint8_t bytes_to_write; uint16_t hwsq_entry_offset; int i; if (bios->data[hwsq_offset] <= entry) { NV_ERROR(drm, "Too few entries in HW sequencer table for " "requested entry\n"); return -ENOENT; } bytes_to_write = bios->data[hwsq_offset + 1]; if (bytes_to_write != 36) { NV_ERROR(drm, "Unknown HW sequencer entry size\n"); return -EINVAL; } NV_INFO(drm, "Loading NV17 power sequencing microcode\n"); hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; /* set sequencer control */ nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); bytes_to_write -= 4; /* write ucode */ for (i = 0; i < bytes_to_write; i += 4) nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); /* twiddle NV_PBUS_DEBUG_4 */ nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18); return 0; } static int load_nv17_hw_sequencer_ucode(struct drm_device *dev, struct nvbios *bios) { /* * BMP based cards, from NV17, need a microcode loading to correctly * control the GPIO etc for LVDS panels * * BIT based cards seem to do this directly in the init scripts * * The microcode entries are found by the "HWSQ" signature. */ const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' }; const int sz = sizeof(hwsq_signature); int hwsq_offset; hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz); if (!hwsq_offset) return 0; /* always use entry 0? */ return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0); } uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; const uint8_t edid_sig[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; uint16_t offset = 0; uint16_t newoffset; int searchlen = NV_PROM_SIZE; if (bios->fp.edid) return bios->fp.edid; while (searchlen) { newoffset = findstr(&bios->data[offset], searchlen, edid_sig, 8); if (!newoffset) return NULL; offset += newoffset; if (!nv_cksum(&bios->data[offset], EDID1_LEN)) break; searchlen -= offset; offset++; } NV_INFO(drm, "Found EDID in BIOS\n"); return bios->fp.edid = &bios->data[offset]; } static bool NVInitVBIOS(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_bios *bios = nvxx_bios(&drm->device); struct nvbios *legacy = &drm->vbios; memset(legacy, 0, sizeof(struct nvbios)); spin_lock_init(&legacy->lock); legacy->dev = dev; legacy->data = bios->data; legacy->length = bios->size; legacy->major_version = bios->version.major; legacy->chip_version = bios->version.chip; if (bios->bit_offset) { legacy->type = NVBIOS_BIT; legacy->offset = bios->bit_offset; return !parse_bit_structure(legacy, legacy->offset + 6); } else if (bios->bmp_offset) { legacy->type = NVBIOS_BMP; legacy->offset = bios->bmp_offset; return !parse_bmp_structure(dev, legacy, legacy->offset); } return false; } int nouveau_run_vbios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; int ret = 0; /* Reset the BIOS head to 0. */ bios->state.crtchead = 0; if (bios->major_version < 5) /* BMP only */ load_nv17_hw_sequencer_ucode(dev, bios); if (bios->execute) { bios->fp.last_script_invoc = 0; bios->fp.lvds_init_run = false; } return ret; } static bool nouveau_bios_posted(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); unsigned htotal; if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) return true; htotal = NVReadVgaCrtc(dev, 0, 0x06); htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; return (htotal != 0); } int nouveau_bios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; int ret; /* only relevant for PCI devices */ if (!dev->pdev) return 0; if (!NVInitVBIOS(dev)) return -ENODEV; ret = parse_dcb_table(dev, bios); if (ret) return ret; if (!bios->major_version) /* we don't run version 0 bios */ return 0; /* init script execution disabled */ bios->execute = false; /* ... unless card isn't POSTed already */ if (!nouveau_bios_posted(dev)) { NV_INFO(drm, "Adaptor not initialised, " "running VBIOS init tables.\n"); bios->execute = true; } ret = nouveau_run_vbios_init(dev); if (ret) return ret; /* feature_byte on BMP is poor, but init always sets CR4B */ if (bios->major_version < 5) bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40; /* all BIT systems need p_f_m_t for digital_min_front_porch */ if (bios->is_mobile || bios->major_version >= 5) ret = parse_fp_mode_table(dev, bios); /* allow subsequent scripts to execute */ bios->execute = true; return 0; } void nouveau_bios_takedown(struct drm_device *dev) { }
gpl-2.0
randomblame/3.1.10_a50x
drivers/net/arcnet/arc-rimi.c
678
10953
/* * Linux ARCnet driver - "RIM I" (entirely mem-mapped) cards * * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <linux/arcdevice.h> #define VERSION "arcnet: RIM I (entirely mem-mapped) support\n" /* Internal function declarations */ static int arcrimi_probe(struct net_device *dev); static int arcrimi_found(struct net_device *dev); static void arcrimi_command(struct net_device *dev, int command); static int arcrimi_status(struct net_device *dev); static void arcrimi_setmask(struct net_device *dev, int mask); static int arcrimi_reset(struct net_device *dev, int really_reset); static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); /* Handy defines for ARCnet specific stuff */ /* Amount of I/O memory used by the card */ #define BUFFER_SIZE (512) #define MIRROR_SIZE (BUFFER_SIZE*4) /* COM 9026 controller chip --> ARCnet register addresses */ #define _INTMASK (ioaddr+0) /* writable */ #define _STATUS (ioaddr+0) /* readable */ #define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */ #define _RESET (ioaddr+8) /* software reset (on read) */ #define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */ #define _ADDR_HI (ioaddr+15) /* Control registers for said */ #define _ADDR_LO (ioaddr+14) #define _CONFIG (ioaddr+2) /* Configuration register */ #undef ASTATUS #undef ACOMMAND #undef AINTMASK #define ASTATUS() readb(_STATUS) #define ACOMMAND(cmd) writeb((cmd),_COMMAND) #define AINTMASK(msk) writeb((msk),_INTMASK) #define SETCONF() writeb(lp->config,_CONFIG) /* * We cannot probe for a RIM I card; one reason is I don't know how to reset * them. In fact, we can't even get their node ID automatically. So, we * need to be passed a specific shmem address, IRQ, and node ID. */ static int __init arcrimi_probe(struct net_device *dev) { BUGLVL(D_NORMAL) printk(VERSION); BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", dev->dev_addr[0], dev->mem_start, dev->irq); if (dev->mem_start <= 0 || dev->irq <= 0) { BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " "must specify the shmem and irq!\n"); return -ENODEV; } if (dev->dev_addr[0] == 0) { BUGMSG(D_NORMAL, "You need to specify your card's station " "ID!\n"); return -ENODEV; } /* * Grab the memory region at mem_start for MIRROR_SIZE bytes. * Later in arcrimi_found() the real size will be determined * and this reserve will be released and the correct size * will be taken. */ if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { BUGMSG(D_NORMAL, "Card memory already allocated\n"); return -ENODEV; } return arcrimi_found(dev); } static int check_mirror(unsigned long addr, size_t size) { void __iomem *p; int res = -1; if (!request_mem_region(addr, size, "arcnet (90xx)")) return -1; p = ioremap(addr, size); if (p) { if (readb(p) == TESTvalue) res = 1; else res = 0; iounmap(p); } release_mem_region(addr, size); return res; } /* * Set up the struct net_device associated with this card. Called after * probing succeeds. */ static int __init arcrimi_found(struct net_device *dev) { struct arcnet_local *lp; unsigned long first_mirror, last_mirror, shmem; void __iomem *p; int mirror_size; int err; p = ioremap(dev->mem_start, MIRROR_SIZE); if (!p) { release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't ioremap\n"); return -ENODEV; } /* reserve the irq */ if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { iounmap(p); release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); return -ENODEV; } shmem = dev->mem_start; writeb(TESTvalue, p); writeb(dev->dev_addr[0], p + 1); /* actually the node ID */ /* find the real shared memory start/end points, including mirrors */ /* guess the actual size of one "memory mirror" - the number of * bytes between copies of the shared memory. On most cards, it's * 2k (or there are no mirrors at all) but on some, it's 4k. */ mirror_size = MIRROR_SIZE; if (readb(p) == TESTvalue && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) mirror_size = 2 * MIRROR_SIZE; first_mirror = shmem - mirror_size; while (check_mirror(first_mirror, mirror_size) == 1) first_mirror -= mirror_size; first_mirror += mirror_size; last_mirror = shmem + mirror_size; while (check_mirror(last_mirror, mirror_size) == 1) last_mirror += mirror_size; last_mirror -= mirror_size; dev->mem_start = first_mirror; dev->mem_end = last_mirror + MIRROR_SIZE - 1; /* initialize the rest of the device structure. */ lp = netdev_priv(dev); lp->card_name = "RIM I"; lp->hw.command = arcrimi_command; lp->hw.status = arcrimi_status; lp->hw.intmask = arcrimi_setmask; lp->hw.reset = arcrimi_reset; lp->hw.owner = THIS_MODULE; lp->hw.copy_to_card = arcrimi_copy_to_card; lp->hw.copy_from_card = arcrimi_copy_from_card; /* * re-reserve the memory region - arcrimi_probe() alloced this reqion * but didn't know the real size. Free that region and then re-get * with the correct size. There is a VERY slim chance this could * fail. */ iounmap(p); release_mem_region(shmem, MIRROR_SIZE); if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) { BUGMSG(D_NORMAL, "Card memory already allocated\n"); goto err_free_irq; } lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1); if (!lp->mem_start) { BUGMSG(D_NORMAL, "Can't remap device memory!\n"); goto err_release_mem; } /* get and check the station ID from offset 1 in shmem */ dev->dev_addr[0] = readb(lp->mem_start + 1); BUGMSG(D_NORMAL, "ARCnet RIM I: station %02Xh found at IRQ %d, " "ShMem %lXh (%ld*%d bytes).\n", dev->dev_addr[0], dev->irq, dev->mem_start, (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size); err = register_netdev(dev); if (err) goto err_unmap; return 0; err_unmap: iounmap(lp->mem_start); err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_irq: free_irq(dev->irq, dev); return -EIO; } /* * Do a hardware reset on the card, and set up necessary registers. * * This should be called as little as possible, because it disrupts the * token on the network (causes a RECON) and requires a significant delay. * * However, it does make sure the card is in a defined state. */ static int arcrimi_reset(struct net_device *dev, int really_reset) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); if (really_reset) { writeb(TESTvalue, ioaddr - 0x800); /* fake reset */ return 0; } ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */ ACOMMAND(CFLAGScmd | CONFIGclear); /* enable extended (512-byte) packets */ ACOMMAND(CONFIGcmd | EXTconf); /* done! return success. */ return 0; } static void arcrimi_setmask(struct net_device *dev, int mask) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; AINTMASK(mask); } static int arcrimi_status(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; return ASTATUS(); } static void arcrimi_command(struct net_device *dev, int cmd) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; ACOMMAND(cmd); } static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count)); } static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); } static int node; static int io; /* use the insmod io= irq= node= options */ static int irq; static char device[9]; /* use eg. device=arc1 to change name */ module_param(node, int, 0); module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); MODULE_LICENSE("GPL"); static struct net_device *my_dev; static int __init arc_rimi_init(void) { struct net_device *dev; dev = alloc_arcdev(device); if (!dev) return -ENOMEM; if (node && node != 0xff) dev->dev_addr[0] = node; dev->mem_start = io; dev->irq = irq; if (dev->irq == 2) dev->irq = 9; if (arcrimi_probe(dev)) { free_netdev(dev); return -EIO; } my_dev = dev; return 0; } static void __exit arc_rimi_exit(void) { struct net_device *dev = my_dev; struct arcnet_local *lp = netdev_priv(dev); unregister_netdev(dev); iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); free_netdev(dev); } #ifndef MODULE static int __init arcrimi_setup(char *s) { int ints[8]; s = get_options(s, 8, ints); if (!ints[0]) return 1; switch (ints[0]) { default: /* ERROR */ printk("arcrimi: Too many arguments.\n"); case 3: /* Node ID */ node = ints[3]; case 2: /* IRQ */ irq = ints[2]; case 1: /* IO address */ io = ints[1]; } if (*s) snprintf(device, sizeof(device), "%s", s); return 1; } __setup("arcrimi=", arcrimi_setup); #endif /* MODULE */ module_init(arc_rimi_init) module_exit(arc_rimi_exit)
gpl-2.0
CurtisMJ/g800f_custom_kernel
drivers/leds/leds-lp5521.c
1446
23575
/* * LP5521 LED chip driver. * * Copyright (C) 2010 Nokia Corporation * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/leds.h> #include <linux/leds-lp5521.h> #include <linux/workqueue.h> #include <linux/slab.h> #define LP5521_PROGRAM_LENGTH 32 /* in bytes */ #define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */ #define LP5521_MAX_ENGINES 3 /* Maximum number of engines */ #define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */ #define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */ #define LP5521_CMD_LOAD 0x15 /* 00010101 */ #define LP5521_CMD_RUN 0x2a /* 00101010 */ #define LP5521_CMD_DIRECT 0x3f /* 00111111 */ #define LP5521_CMD_DISABLED 0x00 /* 00000000 */ /* Registers */ #define LP5521_REG_ENABLE 0x00 #define LP5521_REG_OP_MODE 0x01 #define LP5521_REG_R_PWM 0x02 #define LP5521_REG_G_PWM 0x03 #define LP5521_REG_B_PWM 0x04 #define LP5521_REG_R_CURRENT 0x05 #define LP5521_REG_G_CURRENT 0x06 #define LP5521_REG_B_CURRENT 0x07 #define LP5521_REG_CONFIG 0x08 #define LP5521_REG_R_CHANNEL_PC 0x09 #define LP5521_REG_G_CHANNEL_PC 0x0A #define LP5521_REG_B_CHANNEL_PC 0x0B #define LP5521_REG_STATUS 0x0C #define LP5521_REG_RESET 0x0D #define LP5521_REG_GPO 0x0E #define LP5521_REG_R_PROG_MEM 0x10 #define LP5521_REG_G_PROG_MEM 0x30 #define LP5521_REG_B_PROG_MEM 0x50 #define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM #define LP5521_PROG_MEM_SIZE 0x20 /* Base register to set LED current */ #define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT /* Base register to set the brightness */ #define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM /* Bits in ENABLE register */ #define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */ #define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */ #define LP5521_EXEC_RUN 0x2A #define LP5521_ENABLE_DEFAULT \ (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM) #define LP5521_ENABLE_RUN_PROGRAM \ (LP5521_ENABLE_DEFAULT | LP5521_EXEC_RUN) /* Status */ #define LP5521_EXT_CLK_USED 0x08 /* default R channel current register value */ #define LP5521_REG_R_CURR_DEFAULT 0xAF /* Pattern Mode */ #define PATTERN_OFF 0 struct lp5521_engine { int id; u8 mode; u8 prog_page; u8 engine_mask; }; struct lp5521_led { int id; u8 chan_nr; u8 led_current; u8 max_current; struct led_classdev cdev; struct work_struct brightness_work; u8 brightness; }; struct lp5521_chip { struct lp5521_platform_data *pdata; struct mutex lock; /* Serialize control */ struct i2c_client *client; struct lp5521_engine engines[LP5521_MAX_ENGINES]; struct lp5521_led leds[LP5521_MAX_LEDS]; u8 num_channels; u8 num_leds; }; static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) { return container_of(cdev, struct lp5521_led, cdev); } static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) { return container_of(engine, struct lp5521_chip, engines[engine->id - 1]); } static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) { return container_of(led, struct lp5521_chip, leds[led->id]); } static void lp5521_led_brightness_work(struct work_struct *work); static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf) { s32 ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) return -EIO; *buf = ret; return 0; } static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret; u8 engine_state; /* Only transition between RUN and DIRECT mode are handled here */ if (mode == LP5521_CMD_LOAD) return 0; if (mode == LP5521_CMD_DISABLED) mode = LP5521_CMD_DIRECT; ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state); if (ret < 0) return ret; /* set mode only for this engine */ engine_state &= ~(engine->engine_mask); mode &= engine->engine_mask; engine_state |= mode; return lp5521_write(client, LP5521_REG_OP_MODE, engine_state); } static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) { struct lp5521_chip *chip = engine_to_lp5521(eng); struct i2c_client *client = chip->client; int ret; int addr; u8 mode; /* move current engine to direct mode and remember the state */ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); if (ret) return ret; /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode); if (ret) return ret; /* For loading, all the engines to load mode */ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; i2c_smbus_write_i2c_block_data(client, addr, LP5521_PROG_MEM_SIZE, pattern); return lp5521_write(client, LP5521_REG_OP_MODE, mode); } static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) { return lp5521_write(chip->client, LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, curr); } static void lp5521_init_engine(struct lp5521_chip *chip) { int i; for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { chip->engines[i].id = i + 1; chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); chip->engines[i].prog_page = i; } } static int lp5521_configure(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; u8 cfg; lp5521_init_engine(chip); /* Set all PWMs to direct control mode */ ret = lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); cfg = chip->pdata->update_config ? : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT); ret |= lp5521_write(client, LP5521_REG_CONFIG, cfg); /* Initialize all channels PWM to zero -> leds off */ ret |= lp5521_write(client, LP5521_REG_R_PWM, 0); ret |= lp5521_write(client, LP5521_REG_G_PWM, 0); ret |= lp5521_write(client, LP5521_REG_B_PWM, 0); /* Set engines are set to run state when OP_MODE enables engines */ ret |= lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM); /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); return ret; } static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf) { int ret; u8 status; ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status); if (ret < 0) return ret; /* Check that ext clock is really in use if requested */ if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT) if ((status & LP5521_EXT_CLK_USED) == 0) return -EIO; return 0; } static void lp5521_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { struct lp5521_led *led = cdev_to_led(cdev); led->brightness = (u8)brightness; schedule_work(&led->brightness_work); } static void lp5521_led_brightness_work(struct work_struct *work) { struct lp5521_led *led = container_of(work, struct lp5521_led, brightness_work); struct lp5521_chip *chip = led_to_lp5521(led); struct i2c_client *client = chip->client; mutex_lock(&chip->lock); lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); } /* Detect the chip by setting its ENABLE register and reading it back. */ static int lp5521_detect(struct i2c_client *client) { int ret; u8 buf; ret = lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT); if (ret) return ret; /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); if (ret) return ret; if (buf != LP5521_ENABLE_DEFAULT) return -ENODEV; return 0; } /* Set engine mode and create appropriate sysfs attributes, if required. */ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) { int ret = 0; /* if in that mode already do nothing, except for run */ if (mode == engine->mode && mode != LP5521_CMD_RUN) return 0; if (mode == LP5521_CMD_RUN) { ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN); } else if (mode == LP5521_CMD_LOAD) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); } else if (mode == LP5521_CMD_DISABLED) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); } engine->mode = mode; return ret; } static int lp5521_do_store_load(struct lp5521_engine *engine, const char *buf, size_t len) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret, nrchars, offset = 0, i = 0; char c[3]; unsigned cmd; u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { /* separate sscanfs because length is working only for %s */ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); if (ret != 2) goto fail; ret = sscanf(c, "%2x", &cmd); if (ret != 1) goto fail; pattern[i] = (u8)cmd; offset += nrchars; i++; } /* Each instruction is 16bit long. Check that length is even */ if (i % 2) goto fail; mutex_lock(&chip->lock); if (engine->mode == LP5521_CMD_LOAD) ret = lp5521_load_program(engine, pattern); else ret = -EINVAL; mutex_unlock(&chip->lock); if (ret) { dev_err(&client->dev, "failed loading pattern\n"); return ret; } return len; fail: dev_err(&client->dev, "wrong pattern format\n"); return -EINVAL; } static ssize_t store_engine_load(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); return lp5521_do_store_load(&chip->engines[nr - 1], buf, len); } #define store_load(nr) \ static ssize_t store_engine##nr##_load(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_load(dev, attr, buf, len, nr); \ } store_load(1) store_load(2) store_load(3) static ssize_t show_engine_mode(struct device *dev, struct device_attribute *attr, char *buf, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); switch (chip->engines[nr - 1].mode) { case LP5521_CMD_RUN: return sprintf(buf, "run\n"); case LP5521_CMD_LOAD: return sprintf(buf, "load\n"); case LP5521_CMD_DISABLED: return sprintf(buf, "disabled\n"); default: return sprintf(buf, "disabled\n"); } } #define show_mode(nr) \ static ssize_t show_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return show_engine_mode(dev, attr, buf, nr); \ } show_mode(1) show_mode(2) show_mode(3) static ssize_t store_engine_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); struct lp5521_engine *engine = &chip->engines[nr - 1]; mutex_lock(&chip->lock); if (!strncmp(buf, "run", 3)) lp5521_set_mode(engine, LP5521_CMD_RUN); else if (!strncmp(buf, "load", 4)) lp5521_set_mode(engine, LP5521_CMD_LOAD); else if (!strncmp(buf, "disabled", 8)) lp5521_set_mode(engine, LP5521_CMD_DISABLED); mutex_unlock(&chip->lock); return len; } #define store_mode(nr) \ static ssize_t store_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_mode(dev, attr, buf, len, nr); \ } store_mode(1) store_mode(2) store_mode(3) static ssize_t show_max_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->max_current); } static ssize_t show_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->led_current); } static ssize_t store_current(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); struct lp5521_chip *chip = led_to_lp5521(led); ssize_t ret; unsigned long curr; if (kstrtoul(buf, 0, &curr)) return -EINVAL; if (curr > led->max_current) return -EINVAL; mutex_lock(&chip->lock); ret = lp5521_set_led_current(chip, led->id, curr); mutex_unlock(&chip->lock); if (ret < 0) return ret; led->led_current = (u8)curr; return len; } static ssize_t lp5521_selftest(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; mutex_lock(&chip->lock); ret = lp5521_run_selftest(chip, buf); mutex_unlock(&chip->lock); return sprintf(buf, "%s\n", ret ? "FAIL" : "OK"); } static void lp5521_clear_program_memory(struct i2c_client *cl) { int i; u8 rgb_mem[] = { LP5521_REG_R_PROG_MEM, LP5521_REG_G_PROG_MEM, LP5521_REG_B_PROG_MEM, }; for (i = 0; i < ARRAY_SIZE(rgb_mem); i++) { lp5521_write(cl, rgb_mem[i], 0); lp5521_write(cl, rgb_mem[i] + 1, 0); } } static void lp5521_write_program_memory(struct i2c_client *cl, u8 base, u8 *rgb, int size) { int i; if (!rgb || size <= 0) return; for (i = 0; i < size; i++) lp5521_write(cl, base + i, *(rgb + i)); lp5521_write(cl, base + i, 0); lp5521_write(cl, base + i + 1, 0); } static inline struct lp5521_led_pattern *lp5521_get_pattern (struct lp5521_chip *chip, u8 offset) { struct lp5521_led_pattern *ptn; ptn = chip->pdata->patterns + (offset - 1); return ptn; } static void lp5521_run_led_pattern(int mode, struct lp5521_chip *chip) { struct lp5521_led_pattern *ptn; struct i2c_client *cl = chip->client; int num_patterns = chip->pdata->num_patterns; if (mode > num_patterns || !(chip->pdata->patterns)) return; if (mode == PATTERN_OFF) { lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT); usleep_range(1000, 2000); lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); } else { ptn = lp5521_get_pattern(chip, mode); if (!ptn) return; lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); usleep_range(1000, 2000); lp5521_clear_program_memory(cl); lp5521_write_program_memory(cl, LP5521_REG_R_PROG_MEM, ptn->r, ptn->size_r); lp5521_write_program_memory(cl, LP5521_REG_G_PROG_MEM, ptn->g, ptn->size_g); lp5521_write_program_memory(cl, LP5521_REG_B_PROG_MEM, ptn->b, ptn->size_b); lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_RUN); usleep_range(1000, 2000); lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM); } } static ssize_t store_led_pattern(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); unsigned long val; int ret; ret = strict_strtoul(buf, 16, &val); if (ret) return ret; lp5521_run_led_pattern(val, chip); return len; } /* led class device attributes */ static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); static struct attribute *lp5521_led_attributes[] = { &dev_attr_led_current.attr, &dev_attr_max_current.attr, NULL, }; static struct attribute_group lp5521_led_attribute_group = { .attrs = lp5521_led_attributes }; /* device attributes */ static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, show_engine1_mode, store_engine1_mode); static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, show_engine2_mode, store_engine2_mode); static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, show_engine3_mode, store_engine3_mode); static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); static DEVICE_ATTR(led_pattern, S_IWUSR, NULL, store_led_pattern); static struct attribute *lp5521_attributes[] = { &dev_attr_engine1_mode.attr, &dev_attr_engine2_mode.attr, &dev_attr_engine3_mode.attr, &dev_attr_selftest.attr, &dev_attr_engine1_load.attr, &dev_attr_engine2_load.attr, &dev_attr_engine3_load.attr, &dev_attr_led_pattern.attr, NULL }; static const struct attribute_group lp5521_group = { .attrs = lp5521_attributes, }; static int lp5521_register_sysfs(struct i2c_client *client) { struct device *dev = &client->dev; return sysfs_create_group(&dev->kobj, &lp5521_group); } static void lp5521_unregister_sysfs(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); struct device *dev = &client->dev; int i; sysfs_remove_group(&dev->kobj, &lp5521_group); for (i = 0; i < chip->num_leds; i++) sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, &lp5521_led_attribute_group); } static int __devinit lp5521_init_led(struct lp5521_led *led, struct i2c_client *client, int chan, struct lp5521_platform_data *pdata) { struct device *dev = &client->dev; char name[32]; int res; if (chan >= LP5521_MAX_LEDS) return -EINVAL; if (pdata->led_config[chan].led_current == 0) return 0; led->led_current = pdata->led_config[chan].led_current; led->max_current = pdata->led_config[chan].max_current; led->chan_nr = pdata->led_config[chan].chan_nr; if (led->chan_nr >= LP5521_MAX_LEDS) { dev_err(dev, "Use channel numbers between 0 and %d\n", LP5521_MAX_LEDS - 1); return -EINVAL; } led->cdev.brightness_set = lp5521_set_brightness; if (pdata->led_config[chan].name) { led->cdev.name = pdata->led_config[chan].name; } else { snprintf(name, sizeof(name), "%s:channel%d", pdata->label ?: client->name, chan); led->cdev.name = name; } res = led_classdev_register(dev, &led->cdev); if (res < 0) { dev_err(dev, "couldn't register led on channel %d\n", chan); return res; } res = sysfs_create_group(&led->cdev.dev->kobj, &lp5521_led_attribute_group); if (res < 0) { dev_err(dev, "couldn't register current attribute\n"); led_classdev_unregister(&led->cdev); return res; } return 0; } static int __devinit lp5521_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp5521_chip *chip; struct lp5521_platform_data *pdata; int ret, i, led; u8 buf; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; i2c_set_clientdata(client, chip); chip->client = client; pdata = client->dev.platform_data; if (!pdata) { dev_err(&client->dev, "no platform data\n"); ret = -EINVAL; goto fail1; } mutex_init(&chip->lock); chip->pdata = pdata; if (pdata->setup_resources) { ret = pdata->setup_resources(); if (ret < 0) goto fail1; } if (pdata->enable) { pdata->enable(0); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ pdata->enable(1); usleep_range(1000, 2000); /* 500us abs min. */ } lp5521_write(client, LP5521_REG_RESET, 0xff); usleep_range(10000, 20000); /* * Exact value is not available. 10 - 20ms * appears to be enough for reset. */ /* * Make sure that the chip is reset by reading back the r channel * current reg. This is dummy read is required on some platforms - * otherwise further access to the R G B channels in the * LP5521_REG_ENABLE register will not have any effect - strange! */ ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf); if (ret || buf != LP5521_REG_R_CURR_DEFAULT) { dev_err(&client->dev, "error in resetting chip\n"); goto fail2; } usleep_range(10000, 20000); ret = lp5521_detect(client); if (ret) { dev_err(&client->dev, "Chip not found\n"); goto fail2; } dev_info(&client->dev, "%s programmable led chip found\n", id->name); ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); goto fail2; } /* Initialize leds */ chip->num_channels = pdata->num_channels; chip->num_leds = 0; led = 0; for (i = 0; i < pdata->num_channels; i++) { /* Do not initialize channels that are not connected */ if (pdata->led_config[i].led_current == 0) continue; ret = lp5521_init_led(&chip->leds[led], client, i, pdata); if (ret) { dev_err(&client->dev, "error initializing leds\n"); goto fail3; } chip->num_leds++; chip->leds[led].id = led; /* Set initial LED current */ lp5521_set_led_current(chip, led, chip->leds[led].led_current); INIT_WORK(&(chip->leds[led].brightness_work), lp5521_led_brightness_work); led++; } ret = lp5521_register_sysfs(client); if (ret) { dev_err(&client->dev, "registering sysfs failed\n"); goto fail3; } return ret; fail3: for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } fail2: if (pdata->enable) pdata->enable(0); if (pdata->release_resources) pdata->release_resources(); fail1: kfree(chip); return ret; } static int __devexit lp5521_remove(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int i; lp5521_run_led_pattern(PATTERN_OFF, chip); lp5521_unregister_sysfs(client); for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } if (chip->pdata->enable) chip->pdata->enable(0); if (chip->pdata->release_resources) chip->pdata->release_resources(); kfree(chip); return 0; } static const struct i2c_device_id lp5521_id[] = { { "lp5521", 0 }, /* Three channel chip */ { } }; MODULE_DEVICE_TABLE(i2c, lp5521_id); static struct i2c_driver lp5521_driver = { .driver = { .name = "lp5521", }, .probe = lp5521_probe, .remove = __devexit_p(lp5521_remove), .id_table = lp5521_id, }; module_i2c_driver(lp5521_driver); MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo"); MODULE_DESCRIPTION("LP5521 LED engine"); MODULE_LICENSE("GPL v2");
gpl-2.0
sndnvaps/linux-1
arch/mips/loongson64/common/pm.c
1958
3281
/* * loongson-specific suspend support * * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin <wuzhangjin@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/suspend.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <asm/i8259.h> #include <asm/mipsregs.h> #include <loongson.h> static unsigned int __maybe_unused cached_master_mask; /* i8259A */ static unsigned int __maybe_unused cached_slave_mask; static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */ void arch_suspend_disable_irqs(void) { /* disable all mips events */ local_irq_disable(); #ifdef CONFIG_I8259 /* disable all events of i8259A */ cached_slave_mask = inb(PIC_SLAVE_IMR); cached_master_mask = inb(PIC_MASTER_IMR); outb(0xff, PIC_SLAVE_IMR); inb(PIC_SLAVE_IMR); outb(0xff, PIC_MASTER_IMR); inb(PIC_MASTER_IMR); #endif /* disable all events of bonito */ cached_bonito_irq_mask = LOONGSON_INTEN; LOONGSON_INTENCLR = 0xffff; (void)LOONGSON_INTENCLR; } void arch_suspend_enable_irqs(void) { /* enable all mips events */ local_irq_enable(); #ifdef CONFIG_I8259 /* only enable the cached events of i8259A */ outb(cached_slave_mask, PIC_SLAVE_IMR); outb(cached_master_mask, PIC_MASTER_IMR); #endif /* enable all cached events of bonito */ LOONGSON_INTENSET = cached_bonito_irq_mask; (void)LOONGSON_INTENSET; } /* * Setup the board-specific events for waking up loongson from wait mode */ void __weak setup_wakeup_events(void) { } /* * Check wakeup events */ int __weak wakeup_loongson(void) { return 1; } /* * If the events are really what we want to wakeup the CPU, wake it up * otherwise put the CPU asleep again. */ static void wait_for_wakeup_events(void) { while (!wakeup_loongson()) LOONGSON_CHIPCFG(0) &= ~0x7; } /* * Stop all perf counters * * $24 is the control register of Loongson perf counter */ static inline void stop_perf_counters(void) { __write_64bit_c0_register($24, 0, 0); } static void loongson_suspend_enter(void) { static unsigned int cached_cpu_freq; /* setup wakeup events via enabling the IRQs */ setup_wakeup_events(); stop_perf_counters(); cached_cpu_freq = LOONGSON_CHIPCFG(0); /* Put CPU into wait mode */ LOONGSON_CHIPCFG(0) &= ~0x7; /* wait for the given events to wakeup cpu from wait mode */ wait_for_wakeup_events(); LOONGSON_CHIPCFG(0) = cached_cpu_freq; mmiowb(); } void __weak mach_suspend(void) { } void __weak mach_resume(void) { } static int loongson_pm_enter(suspend_state_t state) { mach_suspend(); /* processor specific suspend */ loongson_suspend_enter(); mach_resume(); return 0; } static int loongson_pm_valid_state(suspend_state_t state) { switch (state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static const struct platform_suspend_ops loongson_pm_ops = { .valid = loongson_pm_valid_state, .enter = loongson_pm_enter, }; static int __init loongson_pm_init(void) { suspend_set_ops(&loongson_pm_ops); return 0; } arch_initcall(loongson_pm_init);
gpl-2.0
pio-masaki/kernel_at1s0
arch/um/os-Linux/main.c
1958
5967
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <string.h> #include <sys/resource.h> #include "as-layout.h" #include "init.h" #include "kern_constants.h" #include "kern_util.h" #include "os.h" #include "um_malloc.h" #define PGD_BOUND (4 * 1024 * 1024) #define STACKSIZE (8 * 1024 * 1024) #define THREAD_NAME_LEN (256) static void set_stklim(void) { struct rlimit lim; if (getrlimit(RLIMIT_STACK, &lim) < 0) { perror("getrlimit"); exit(1); } if ((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)) { lim.rlim_cur = STACKSIZE; if (setrlimit(RLIMIT_STACK, &lim) < 0) { perror("setrlimit"); exit(1); } } } static __init void do_uml_initcalls(void) { initcall_t *call; call = &__uml_initcall_start; while (call < &__uml_initcall_end) { (*call)(); call++; } } static void last_ditch_exit(int sig) { uml_cleanup(); exit(1); } static void install_fatal_handler(int sig) { struct sigaction action; /* All signals are enabled in this handler ... */ sigemptyset(&action.sa_mask); /* * ... including the signal being handled, plus we want the * handler reset to the default behavior, so that if an exit * handler is hanging for some reason, the UML will just die * after this signal is sent a second time. */ action.sa_flags = SA_RESETHAND | SA_NODEFER; action.sa_restorer = NULL; action.sa_handler = last_ditch_exit; if (sigaction(sig, &action, NULL) < 0) { printf("failed to install handler for signal %d - errno = %d\n", sig, errno); exit(1); } } #define UML_LIB_PATH ":/usr/lib/uml" static void setup_env_path(void) { char *new_path = NULL; char *old_path = NULL; int path_len = 0; old_path = getenv("PATH"); /* * if no PATH variable is set or it has an empty value * just use the default + /usr/lib/uml */ if (!old_path || (path_len = strlen(old_path)) == 0) { if (putenv("PATH=:/bin:/usr/bin/" UML_LIB_PATH)) perror("couldn't putenv"); return; } /* append /usr/lib/uml to the existing path */ path_len += strlen("PATH=" UML_LIB_PATH) + 1; new_path = malloc(path_len); if (!new_path) { perror("couldn't malloc to set a new PATH"); return; } snprintf(new_path, path_len, "PATH=%s" UML_LIB_PATH, old_path); if (putenv(new_path)) { perror("couldn't putenv to set a new PATH"); free(new_path); } } extern void scan_elf_aux( char **envp); int __init main(int argc, char **argv, char **envp) { char **new_argv; int ret, i, err; set_stklim(); setup_env_path(); new_argv = malloc((argc + 1) * sizeof(char *)); if (new_argv == NULL) { perror("Mallocing argv"); exit(1); } for (i = 0; i < argc; i++) { new_argv[i] = strdup(argv[i]); if (new_argv[i] == NULL) { perror("Mallocing an arg"); exit(1); } } new_argv[argc] = NULL; /* * Allow these signals to bring down a UML if all other * methods of control fail. */ install_fatal_handler(SIGINT); install_fatal_handler(SIGTERM); install_fatal_handler(SIGHUP); scan_elf_aux(envp); do_uml_initcalls(); ret = linux_main(argc, argv); /* * Disable SIGPROF - I have no idea why libc doesn't do this or turn * off the profiling time, but UML dies with a SIGPROF just before * exiting when profiling is active. */ change_sig(SIGPROF, 0); /* * This signal stuff used to be in the reboot case. However, * sometimes a SIGVTALRM can come in when we're halting (reproducably * when writing out gcov information, presumably because that takes * some time) and cause a segfault. */ /* stop timers and set SIGVTALRM to be ignored */ disable_timer(); /* disable SIGIO for the fds and set SIGIO to be ignored */ err = deactivate_all_fds(); if (err) printf("deactivate_all_fds failed, errno = %d\n", -err); /* * Let any pending signals fire now. This ensures * that they won't be delivered after the exec, when * they are definitely not expected. */ unblock_signals(); /* Reboot */ if (ret) { printf("\n"); execvp(new_argv[0], new_argv); perror("Failed to exec kernel"); ret = 1; } printf("\n"); return uml_exitcode; } extern void *__real_malloc(int); void *__wrap_malloc(int size) { void *ret; if (!kmalloc_ok) return __real_malloc(size); else if (size <= UM_KERN_PAGE_SIZE) /* finding contiguous pages can be hard*/ ret = uml_kmalloc(size, UM_GFP_KERNEL); else ret = vmalloc(size); /* * glibc people insist that if malloc fails, errno should be * set by malloc as well. So we do. */ if (ret == NULL) errno = ENOMEM; return ret; } void *__wrap_calloc(int n, int size) { void *ptr = __wrap_malloc(n * size); if (ptr == NULL) return NULL; memset(ptr, 0, n * size); return ptr; } extern void __real_free(void *); extern unsigned long high_physmem; void __wrap_free(void *ptr) { unsigned long addr = (unsigned long) ptr; /* * We need to know how the allocation happened, so it can be correctly * freed. This is done by seeing what region of memory the pointer is * in - * physical memory - kmalloc/kfree * kernel virtual memory - vmalloc/vfree * anywhere else - malloc/free * If kmalloc is not yet possible, then either high_physmem and/or * end_vm are still 0 (as at startup), in which case we call free, or * we have set them, but anyway addr has not been allocated from those * areas. So, in both cases __real_free is called. * * CAN_KMALLOC is checked because it would be bad to free a buffer * with kmalloc/vmalloc after they have been turned off during * shutdown. * XXX: However, we sometimes shutdown CAN_KMALLOC temporarily, so * there is a possibility for memory leaks. */ if ((addr >= uml_physmem) && (addr < high_physmem)) { if (kmalloc_ok) kfree(ptr); } else if ((addr >= start_vm) && (addr < end_vm)) { if (kmalloc_ok) vfree(ptr); } else __real_free(ptr); }
gpl-2.0
01org/prd
arch/mips/kernel/vpe-mt.c
2214
11295
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2013 Imagination Technologies Ltd. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/vpe.h> static int major; /* The number of TCs and VPEs physically available on the core */ static int hw_tcs, hw_vpes; /* We are prepared so configure and start the VPE... */ int vpe_run(struct vpe *v) { unsigned long flags, val, dmt_flag; struct vpe_notifications *notifier; unsigned int vpeflags; struct tc *t; /* check we are the Master VPE */ local_irq_save(flags); val = read_c0_vpeconf0(); if (!(val & VPECONF0_MVP)) { pr_warn("VPE loader: only Master VPE's are able to config MT\n"); local_irq_restore(flags); return -1; } dmt_flag = dmt(); vpeflags = dvpe(); if (list_empty(&v->tc)) { evpe(vpeflags); emt(dmt_flag); local_irq_restore(flags); pr_warn("VPE loader: No TC's associated with VPE %d\n", v->minor); return -ENOEXEC; } t = list_first_entry(&v->tc, struct tc, tc); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(t->index); /* should check it is halted, and not activated */ if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { evpe(vpeflags); emt(dmt_flag); local_irq_restore(flags); pr_warn("VPE loader: TC %d is already active!\n", t->index); return -ENOEXEC; } /* * Write the address we want it to start running from in the TCPC * register. */ write_tc_c0_tcrestart((unsigned long)v->__start); write_tc_c0_tccontext((unsigned long)0); /* * Mark the TC as activated, not interrupt exempt and not dynamically * allocatable */ val = read_tc_c0_tcstatus(); val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; write_tc_c0_tcstatus(val); write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* * The sde-kit passes 'memsize' to __start in $a3, so set something * here... Or set $a3 to zero and define DFLT_STACK_SIZE and * DFLT_HEAP_SIZE when you compile your program */ mttgpr(6, v->ntcs); mttgpr(7, physical_memsize); /* set up VPE1 */ /* * bind the TC to VPE 1 as late as possible so we only have the final * VPE registers to set up, and so an EJTAG probe can trigger on it */ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); back_to_back_c0_hazard(); /* Set up the XTC bit in vpeconf0 to point at our tc */ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | (t->index << VPECONF0_XTC_SHIFT)); back_to_back_c0_hazard(); /* enable this VPE */ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); /* clear out any left overs from a previous program */ write_vpe_c0_status(0); write_vpe_c0_cause(0); /* take system out of configuration state */ clear_c0_mvpcontrol(MVPCONTROL_VPC); /* * SMVP kernels manage VPE enable independently, but uniprocessor * kernels need to turn it on, even if that wasn't the pre-dvpe() state. */ #ifdef CONFIG_SMP evpe(vpeflags); #else evpe(EVPE_ENABLE); #endif emt(dmt_flag); local_irq_restore(flags); list_for_each_entry(notifier, &v->notify, list) notifier->start(VPE_MODULE_MINOR); return 0; } void cleanup_tc(struct tc *tc) { unsigned long flags; unsigned int mtflags, vpflags; int tmp; local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(tc->index); tmp = read_tc_c0_tcstatus(); /* mark not allocated and not dynamically allocatable */ tmp &= ~(TCSTATUS_A | TCSTATUS_DA); tmp |= TCSTATUS_IXMT; /* interrupt exempt */ write_tc_c0_tcstatus(tmp); write_tc_c0_tchalt(TCHALT_H); mips_ihb(); clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); } /* module wrapper entry points */ /* give me a vpe */ void *vpe_alloc(void) { int i; struct vpe *v; /* find a vpe */ for (i = 1; i < MAX_VPES; i++) { v = get_vpe(i); if (v != NULL) { v->state = VPE_STATE_INUSE; return v; } } return NULL; } EXPORT_SYMBOL(vpe_alloc); /* start running from here */ int vpe_start(void *vpe, unsigned long start) { struct vpe *v = vpe; v->__start = start; return vpe_run(v); } EXPORT_SYMBOL(vpe_start); /* halt it for now */ int vpe_stop(void *vpe) { struct vpe *v = vpe; struct tc *t; unsigned int evpe_flags; evpe_flags = dvpe(); t = list_entry(v->tc.next, struct tc, tc); if (t != NULL) { settc(t->index); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); } evpe(evpe_flags); return 0; } EXPORT_SYMBOL(vpe_stop); /* I've done with it thank you */ int vpe_free(void *vpe) { struct vpe *v = vpe; struct tc *t; unsigned int evpe_flags; t = list_entry(v->tc.next, struct tc, tc); if (t == NULL) return -ENOEXEC; evpe_flags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(t->index); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); /* halt the TC */ write_tc_c0_tchalt(TCHALT_H); mips_ihb(); /* mark the TC unallocated */ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); v->state = VPE_STATE_UNUSED; clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(evpe_flags); return 0; } EXPORT_SYMBOL(vpe_free); static ssize_t store_kill(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct vpe *vpe = get_vpe(aprp_cpu_index()); struct vpe_notifications *notifier; list_for_each_entry(notifier, &vpe->notify, list) notifier->stop(aprp_cpu_index()); release_progmem(vpe->load_addr); cleanup_tc(get_tc(aprp_cpu_index())); vpe_stop(vpe); vpe_free(vpe); return len; } static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, char *buf) { struct vpe *vpe = get_vpe(aprp_cpu_index()); return sprintf(buf, "%d\n", vpe->ntcs); } static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct vpe *vpe = get_vpe(aprp_cpu_index()); unsigned long new; int ret; ret = kstrtoul(buf, 0, &new); if (ret < 0) return ret; if (new == 0 || new > (hw_tcs - aprp_cpu_index())) return -EINVAL; vpe->ntcs = new; return len; } static DEVICE_ATTR_RW(ntcs); static struct attribute *vpe_attrs[] = { &dev_attr_kill.attr, &dev_attr_ntcs.attr, NULL, }; ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { kfree(cd); } static struct class vpe_class = { .name = "vpe", .owner = THIS_MODULE, .dev_release = vpe_device_release, .dev_groups = vpe_groups, }; static struct device vpe_device; int __init vpe_module_init(void) { unsigned int mtflags, vpflags; unsigned long flags, val; struct vpe *v = NULL; struct tc *t; int tc, err; if (!cpu_has_mipsmt) { pr_warn("VPE loader: not a MIPS MT capable processor\n"); return -ENODEV; } if (vpelimit == 0) { pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" "Pass maxvpes=<n> argument as kernel argument\n"); return -ENODEV; } if (aprp_cpu_index() == 0) { pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" "Pass maxtcs=<n> argument as kernel argument\n"); return -ENODEV; } major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); if (major < 0) { pr_warn("VPE loader: unable to register character device\n"); return major; } err = class_register(&vpe_class); if (err) { pr_err("vpe_class registration failed\n"); goto out_chrdev; } device_initialize(&vpe_device); vpe_device.class = &vpe_class, vpe_device.parent = NULL, dev_set_name(&vpe_device, "vpe1"); vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); err = device_add(&vpe_device); if (err) { pr_err("Adding vpe_device failed\n"); goto out_class; } local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); val = read_c0_mvpconf0(); hw_tcs = (val & MVPCONF0_PTC) + 1; hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { /* * Must re-enable multithreading temporarily or in case we * reschedule send IPIs or similar we might hang. */ clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); t = alloc_tc(tc); if (!t) { err = -ENOMEM; goto out_dev; } local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); set_c0_mvpcontrol(MVPCONTROL_VPC); /* VPE's */ if (tc < hw_tcs) { settc(tc); v = alloc_vpe(tc); if (v == NULL) { pr_warn("VPE: unable to allocate VPE\n"); goto out_reenable; } v->ntcs = hw_tcs - aprp_cpu_index(); /* add the tc to the list of this vpe's tc's. */ list_add(&t->tc, &v->tc); /* deactivate all but vpe0 */ if (tc >= aprp_cpu_index()) { unsigned long tmp = read_vpe_c0_vpeconf0(); tmp &= ~VPECONF0_VPA; /* master VPE */ tmp |= VPECONF0_MVP; write_vpe_c0_vpeconf0(tmp); } /* disable multi-threading with TC's */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); if (tc >= vpelimit) { /* * Set config to be the same as vpe0, * particularly kseg0 coherency alg */ write_vpe_c0_config(read_c0_config()); } } /* TC's */ t->pvpe = v; /* set the parent vpe */ if (tc >= aprp_cpu_index()) { unsigned long tmp; settc(tc); /* * A TC that is bound to any other VPE gets bound to * VPE0, ideally I'd like to make it homeless but it * doesn't appear to let me bind a TC to a non-existent * VPE. Which is perfectly reasonable. * * The (un)bound state is visible to an EJTAG probe so * may notify GDB... */ tmp = read_tc_c0_tcbind(); if (tmp & TCBIND_CURVPE) { /* tc is bound >vpe0 */ write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); t->pvpe = get_vpe(0); /* set the parent vpe */ } /* halt the TC */ write_tc_c0_tchalt(TCHALT_H); mips_ihb(); tmp = read_tc_c0_tcstatus(); /* mark not activated and not dynamically allocatable */ tmp &= ~(TCSTATUS_A | TCSTATUS_DA); tmp |= TCSTATUS_IXMT; /* interrupt exempt */ write_tc_c0_tcstatus(tmp); } } out_reenable: /* release config state */ clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); return 0; out_dev: device_del(&vpe_device); out_class: class_unregister(&vpe_class); out_chrdev: unregister_chrdev(major, VPE_MODULE_NAME); return err; } void __exit vpe_module_exit(void) { struct vpe *v, *n; device_del(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); /* No locking needed here */ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { if (v->state != VPE_STATE_UNUSED) release_vpe(v); } }
gpl-2.0
droidroidz/USCC_R970_kernel
drivers/block/floppy.c
3238
119429
/* * linux/drivers/block/floppy.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1993, 1994 Alain Knaff * Copyright (C) 1998 Alan Cox */ /* * 02.12.91 - Changed to static variables to indicate need for reset * and recalibrate. This makes some things easier (output_byte reset * checking etc), and means less interrupt jumping in case of errors, * so the code is hopefully easier to understand. */ /* * This file is certainly a mess. I've tried my best to get it working, * but I don't like programming floppies, and I have only one anyway. * Urgel. I should check for more errors, and do more graceful error * recovery. Seems there are problems with several drives. I've tried to * correct them. No promises. */ /* * As with hd.c, all routines within this file can (and will) be called * by interrupts, so extreme caution is needed. A hardware interrupt * handler may not sleep, or a kernel panic will happen. Thus I cannot * call "floppy-on" directly, but have to set a special timer interrupt * etc. */ /* * 28.02.92 - made track-buffering routines, based on the routines written * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus. */ /* * Automatic floppy-detection and formatting written by Werner Almesberger * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with * the floppy-change signal detection. */ /* * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed * FDC data overrun bug, added some preliminary stuff for vertical * recording support. * * 1992/9/17: Added DMA allocation & DMA functions. -- hhb. * * TODO: Errors are still not counted properly. */ /* 1992/9/20 * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl) * modeled after the freeware MS-DOS program fdformat/88 V1.8 by * Christoph H. Hochst\"atter. * I have fixed the shift values to the ones I always use. Maybe a new * ioctl() should be created to be able to modify them. * There is a bug in the driver that makes it impossible to format a * floppy as the first thing after bootup. */ /* * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and * this helped the floppy driver as well. Much cleaner, and still seems to * work. */ /* 1994/6/24 --bbroad-- added the floppy table entries and made * minor modifications to allow 2.88 floppies to be run. */ /* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more * disk types. */ /* * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger * format bug fixes, but unfortunately some new bugs too... */ /* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write * errors to allow safe writing by specialized programs. */ /* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks * by defining bit 1 of the "stretch" parameter to mean put sectors on the * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's * drives are "upside-down"). */ /* * 1995/8/26 -- Andreas Busse -- added Mips support. */ /* * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent * features to asm/floppy.h. */ /* * 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support */ /* * 1998/05/07 -- Russell King -- More portability cleanups; moved definition of * interrupt and dma channel to asm/floppy.h. Cleaned up some formatting & * use of '0' for NULL. */ /* * 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation * failures. */ /* * 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives. */ /* * 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24 * days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were * being used to store jiffies, which are unsigned longs). */ /* * 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br> * - get rid of check_region * - s/suser/capable/ */ /* * 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no * floppy controller (lingering task on list after module is gone... boom.) */ /* * 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range * (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix * requires many non-obvious changes in arch dependent code. */ /* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>. * Better audit of register_blkdev. */ #undef FLOPPY_SILENT_DCL_CLEAR #define REALLY_SLOW_IO #define DEBUGT 2 #define DPRINT(format, args...) \ pr_info("floppy%d: " format, current_drive, ##args) #define DCL_DEBUG /* debug disk change line */ #ifdef DCL_DEBUG #define debug_dcl(test, fmt, args...) \ do { if ((test) & FD_DEBUG) DPRINT(fmt, ##args); } while (0) #else #define debug_dcl(test, fmt, args...) \ do { if (0) DPRINT(fmt, ##args); } while (0) #endif /* do print messages for unexpected interrupts */ static int print_unex = 1; #include <linux/module.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/workqueue.h> #define FDPATCHES #include <linux/fdreg.h> #include <linux/fd.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/bio.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/uaccess.h> /* * PS/2 floppies have much slower step rates than regular floppies. * It's been recommended that take about 1/4 of the default speed * in some more extreme cases. */ static DEFINE_MUTEX(floppy_mutex); static int slow_floppy; #include <asm/dma.h> #include <asm/irq.h> static int FLOPPY_IRQ = 6; static int FLOPPY_DMA = 2; static int can_use_virtual_dma = 2; /* ======= * can use virtual DMA: * 0 = use of virtual DMA disallowed by config * 1 = use of virtual DMA prescribed by config * 2 = no virtual DMA preference configured. By default try hard DMA, * but fall back on virtual DMA when not enough memory available */ static int use_virtual_dma; /* ======= * use virtual DMA * 0 using hard DMA * 1 using virtual DMA * This variable is set to virtual when a DMA mem problem arises, and * reset back in floppy_grab_irq_and_dma. * It is not safe to reset it in other circumstances, because the floppy * driver may have several buffers in use at once, and we do currently not * record each buffers capabilities */ static DEFINE_SPINLOCK(floppy_lock); static unsigned short virtual_dma_port = 0x3f0; irqreturn_t floppy_interrupt(int irq, void *dev_id); static int set_dor(int fdc, char mask, char data); #define K_64 0x10000 /* 64KB */ /* the following is the mask of allowed drives. By default units 2 and * 3 of both floppy controllers are disabled, because switching on the * motor of these drives causes system hangs on some PCI computers. drive * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if * a drive is allowed. * * NOTE: This must come before we include the arch floppy header because * some ports reference this variable from there. -DaveM */ static int allowed_drive_mask = 0x33; #include <asm/floppy.h> static int irqdma_allocated; #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/cdrom.h> /* for the compatibility eject ioctl */ #include <linux/completion.h> static struct request *current_req; static void do_fd_request(struct request_queue *q); static int set_next_request(void); #ifndef fd_get_dma_residue #define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) #endif /* Dma Memory related stuff */ #ifndef fd_dma_mem_free #define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) #endif #ifndef fd_dma_mem_alloc #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size)) #endif static inline void fallback_on_nodma_alloc(char **addr, size_t l) { #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA if (*addr) return; /* we have the memory */ if (can_use_virtual_dma != 2) return; /* no fallback allowed */ pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n"); *addr = (char *)nodma_mem_alloc(l); #else return; #endif } /* End dma memory related stuff */ static unsigned long fake_change; static bool initialized; #define ITYPE(x) (((x) >> 2) & 0x1f) #define TOMINOR(x) ((x & 3) | ((x & 4) << 5)) #define UNIT(x) ((x) & 0x03) /* drive on fdc */ #define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */ /* reverse mapping from unit and fdc to drive */ #define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2)) #define DP (&drive_params[current_drive]) #define DRS (&drive_state[current_drive]) #define DRWE (&write_errors[current_drive]) #define FDCS (&fdc_state[fdc]) #define UDP (&drive_params[drive]) #define UDRS (&drive_state[drive]) #define UDRWE (&write_errors[drive]) #define UFDCS (&fdc_state[FDC(drive)]) #define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2) #define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH) /* read/write */ #define COMMAND (raw_cmd->cmd[0]) #define DR_SELECT (raw_cmd->cmd[1]) #define TRACK (raw_cmd->cmd[2]) #define HEAD (raw_cmd->cmd[3]) #define SECTOR (raw_cmd->cmd[4]) #define SIZECODE (raw_cmd->cmd[5]) #define SECT_PER_TRACK (raw_cmd->cmd[6]) #define GAP (raw_cmd->cmd[7]) #define SIZECODE2 (raw_cmd->cmd[8]) #define NR_RW 9 /* format */ #define F_SIZECODE (raw_cmd->cmd[2]) #define F_SECT_PER_TRACK (raw_cmd->cmd[3]) #define F_GAP (raw_cmd->cmd[4]) #define F_FILL (raw_cmd->cmd[5]) #define NR_F 6 /* * Maximum disk size (in kilobytes). * This default is used whenever the current disk size is unknown. * [Now it is rather a minimum] */ #define MAX_DISK_SIZE 4 /* 3984 */ /* * globals used by 'result()' */ #define MAX_REPLIES 16 static unsigned char reply_buffer[MAX_REPLIES]; static int inr; /* size of reply buffer, when called from interrupt */ #define ST0 (reply_buffer[0]) #define ST1 (reply_buffer[1]) #define ST2 (reply_buffer[2]) #define ST3 (reply_buffer[0]) /* result of GETSTATUS */ #define R_TRACK (reply_buffer[3]) #define R_HEAD (reply_buffer[4]) #define R_SECTOR (reply_buffer[5]) #define R_SIZECODE (reply_buffer[6]) #define SEL_DLY (2 * HZ / 100) /* * this struct defines the different floppy drive types. */ static struct { struct floppy_drive_params params; const char *name; /* name printed while booting */ } default_drive_params[] = { /* NOTE: the time values in jiffies should be in msec! CMOS drive type | Maximum data rate supported by drive type | | Head load time, msec | | | Head unload time, msec (not used) | | | | Step rate interval, usec | | | | | Time needed for spinup time (jiffies) | | | | | | Timeout for spinning down (jiffies) | | | | | | | Spindown offset (where disk stops) | | | | | | | | Select delay | | | | | | | | | RPS | | | | | | | | | | Max number of tracks | | | | | | | | | | | Interrupt timeout | | | | | | | | | | | | Max nonintlv. sectors | | | | | | | | | | | | | -Max Errors- flags */ {{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" }, {{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0, 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/ {{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0, 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/ {{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/ {{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/ {{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/ {{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/ /* | --autodetected formats--- | | | * read_track | | Name printed when booting * | Native format * Frequency of disk change checks */ }; static struct floppy_drive_params drive_params[N_DRIVE]; static struct floppy_drive_struct drive_state[N_DRIVE]; static struct floppy_write_errors write_errors[N_DRIVE]; static struct timer_list motor_off_timer[N_DRIVE]; static struct gendisk *disks[N_DRIVE]; static struct block_device *opened_bdev[N_DRIVE]; static DEFINE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; static int fdc_queue; /* * This struct defines the different floppy types. * * Bit 0 of 'stretch' tells if the tracks need to be doubled for some * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch' * tells if the disk is in Commodore 1581 format, which means side 0 sectors * are located on side 1 of the disk but with a side 0 ID, and vice-versa. * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical * side 0 is on physical side 0 (but with the misnamed sector IDs). * 'stretch' should probably be renamed to something more general, like * 'options'. * * Bits 2 through 9 of 'stretch' tell the number of the first sector. * The LSB (bit 2) is flipped. For most disks, the first sector * is 1 (represented by 0x00<<2). For some CP/M and music sampler * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2). * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2). * * Other parameters should be self-explanatory (see also setfdprm(8)). */ /* Size | Sectors per track | | Head | | | Tracks | | | | Stretch | | | | | Gap 1 size | | | | | | Data rate, | 0x40 for perp | | | | | | | Spec1 (stepping rate, head unload | | | | | | | | /fmt gap (gap2) */ static struct floppy_struct floppy_type[32] = { { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */ }; #define SECTSIZE (_FD_SECTSIZE(*floppy)) /* Auto-detection: Disk type used until the next media change occurs. */ static struct floppy_struct *current_type[N_DRIVE]; /* * User-provided type information. current_type points to * the respective entry of this array. */ static struct floppy_struct user_params[N_DRIVE]; static sector_t floppy_sizes[256]; static char floppy_device_name[] = "floppy"; /* * The driver is trying to determine the correct media format * while probing is set. rw_interrupt() clears it after a * successful access. */ static int probing; /* Synchronization of FDC access. */ #define FD_COMMAND_NONE -1 #define FD_COMMAND_ERROR 2 #define FD_COMMAND_OKAY 3 static volatile int command_status = FD_COMMAND_NONE; static unsigned long fdc_busy; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); /* Errors during formatting are counted here. */ static int format_errors; /* Format request descriptor. */ static struct format_descr format_req; /* * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc), * H is head unload time (1=16ms, 2=32ms, etc) */ /* * Track buffer * Because these are written to by the DMA controller, they must * not contain a 64k byte boundary crossing, or data will be * corrupted/lost. */ static char *floppy_track_buffer; static int max_buffer_sectors; static int *errors; typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); /* this is called after the interrupt of the * main command */ void (*redo)(void); /* this is called to retry the operation */ void (*error)(void); /* this is called to tally an error */ done_f done; /* this is called to say if the operation has * succeeded/failed */ } *cont; static void floppy_ready(void); static void floppy_start(void); static void process_fd_request(void); static void recalibrate_floppy(void); static void floppy_shutdown(unsigned long); static int floppy_request_regions(int); static void floppy_release_regions(int); static int floppy_grab_irq_and_dma(void); static void floppy_release_irq_and_dma(void); /* * The "reset" variable should be tested whenever an interrupt is scheduled, * after the commands have been sent. This is to ensure that the driver doesn't * get wedged when the interrupt doesn't come because of a failed command. * reset doesn't need to be tested before sending commands, because * output_byte is automatically disabled when reset is set. */ static void reset_fdc(void); /* * These are global variables, as that's the easiest way to give * information to interrupts. They are the data used for the current * request. */ #define NO_TRACK -1 #define NEED_1_RECAL -2 #define NEED_2_RECAL -3 static atomic_t usage_count = ATOMIC_INIT(0); /* buffer related variables */ static int buffer_track = -1; static int buffer_drive = -1; static int buffer_min = -1; static int buffer_max = -1; /* fdc related variables, should end up in a struct */ static struct floppy_fdc_state fdc_state[N_FDC]; static int fdc; /* current fdc */ static struct floppy_struct *_floppy = floppy_type; static unsigned char current_drive; static long current_count_sectors; static unsigned char fsector_t; /* sector in track */ static unsigned char in_sector_offset; /* offset within physical sector, * expressed in units of 512 bytes */ static inline bool drive_no_geom(int drive) { return !current_type[drive] && !ITYPE(UDRS->fd_device); } #ifndef fd_eject static inline int fd_eject(int drive) { return -EINVAL; } #endif /* * Debugging * ========= */ #ifdef DEBUGT static long unsigned debugtimer; static inline void set_debugt(void) { debugtimer = jiffies; } static inline void debugt(const char *func, const char *msg) { if (DP->flags & DEBUGT) pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer); } #else static inline void set_debugt(void) { } static inline void debugt(const char *func, const char *msg) { } #endif /* DEBUGT */ typedef void (*timeout_fn)(unsigned long); static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0); static const char *timeout_message; static void is_alive(const char *func, const char *message) { /* this routine checks whether the floppy driver is "alive" */ if (test_bit(0, &fdc_busy) && command_status < 2 && !timer_pending(&fd_timeout)) { DPRINT("%s: timeout handler died. %s\n", func, message); } } static void (*do_floppy)(void) = NULL; #define OLOGSIZE 20 static void (*lasthandler)(void); static unsigned long interruptjiffies; static unsigned long resultjiffies; static int resultsize; static unsigned long lastredo; static struct output_log { unsigned char data; unsigned char status; unsigned long jiffies; } output_log[OLOGSIZE]; static int output_log_pos; #define current_reqD -1 #define MAXTIMEOUT -2 static void __reschedule_timeout(int drive, const char *message) { if (drive == current_reqD) drive = current_drive; del_timer(&fd_timeout); if (drive < 0 || drive >= N_DRIVE) { fd_timeout.expires = jiffies + 20UL * HZ; drive = 0; } else fd_timeout.expires = jiffies + UDP->timeout; add_timer(&fd_timeout); if (UDP->flags & FD_DEBUG) DPRINT("reschedule timeout %s\n", message); timeout_message = message; } static void reschedule_timeout(int drive, const char *message) { unsigned long flags; spin_lock_irqsave(&floppy_lock, flags); __reschedule_timeout(drive, message); spin_unlock_irqrestore(&floppy_lock, flags); } #define INFBOUND(a, b) (a) = max_t(int, a, b) #define SUPBOUND(a, b) (a) = min_t(int, a, b) /* * Bottom half floppy driver. * ========================== * * This part of the file contains the code talking directly to the hardware, * and also the main service loop (seek-configure-spinup-command) */ /* * disk change. * This routine is responsible for maintaining the FD_DISK_CHANGE flag, * and the last_checked date. * * last_checked is the date of the last check which showed 'no disk change' * FD_DISK_CHANGE is set under two conditions: * 1. The floppy has been changed after some i/o to that floppy already * took place. * 2. No floppy disk is in the drive. This is done in order to ensure that * requests are quickly flushed in case there is no disk in the drive. It * follows that FD_DISK_CHANGE can only be cleared if there is a disk in * the drive. * * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet. * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on * each seek. If a disk is present, the disk change line should also be * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk * change line is set, this means either that no disk is in the drive, or * that it has been removed since the last seek. * * This means that we really have a third possibility too: * The floppy has been changed after the last seek. */ static int disk_change(int drive) { int fdc = FDC(drive); if (time_before(jiffies, UDRS->select_date + UDP->select_delay)) DPRINT("WARNING disk change called early\n"); if (!(FDCS->dor & (0x10 << UNIT(drive))) || (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { DPRINT("probing disk change on unselected drive\n"); DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive), (unsigned int)FDCS->dor); } debug_dcl(UDP->flags, "checking disk change line for drive %d\n", drive); debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies); debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80); debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags); if (UDP->flags & FD_BROKEN_DCL) return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) { set_bit(FD_VERIFY_BIT, &UDRS->flags); /* verify write protection */ if (UDRS->maxblock) /* mark it changed */ set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); /* invalidate its geometry */ if (UDRS->keep_data >= 0) { if ((UDP->flags & FTD_MSG) && current_type[drive] != NULL) DPRINT("Disk type is undefined after disk change\n"); current_type[drive] = NULL; floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1; } return 1; } else { UDRS->last_checked = jiffies; clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); } return 0; } static inline int is_selected(int dor, int unit) { return ((dor & (0x10 << unit)) && (dor & 3) == unit); } static bool is_ready_state(int status) { int state = status & (STATUS_READY | STATUS_DIR | STATUS_DMA); return state == STATUS_READY; } static int set_dor(int fdc, char mask, char data) { unsigned char unit; unsigned char drive; unsigned char newdor; unsigned char olddor; if (FDCS->address == -1) return -1; olddor = FDCS->dor; newdor = (olddor & mask) | data; if (newdor != olddor) { unit = olddor & 0x3; if (is_selected(olddor, unit) && !is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); debug_dcl(UDP->flags, "calling disk change from set_dor\n"); disk_change(drive); } FDCS->dor = newdor; fd_outb(newdor, FD_DOR); unit = newdor & 0x3; if (!is_selected(olddor, unit) && is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); UDRS->select_date = jiffies; } } return olddor; } static void twaddle(void) { if (DP->select_delay) return; fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR); fd_outb(FDCS->dor, FD_DOR); DRS->select_date = jiffies; } /* * Reset all driver information about the current fdc. * This is needed after a reset, and after a raw command. */ static void reset_fdc_info(int mode) { int drive; FDCS->spec1 = FDCS->spec2 = -1; FDCS->need_configure = 1; FDCS->perp_mode = 1; FDCS->rawcmd = 0; for (drive = 0; drive < N_DRIVE; drive++) if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL)) UDRS->track = NEED_2_RECAL; } /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { if (drive >= 0 && drive < N_DRIVE) { fdc = FDC(drive); current_drive = drive; } if (fdc != 1 && fdc != 0) { pr_info("bad fdc value\n"); return; } set_dor(fdc, ~0, 8); #if N_FDC > 1 set_dor(1 - fdc, ~8, 0); #endif if (FDCS->rawcmd == 2) reset_fdc_info(1); if (fd_inb(FD_STATUS) != STATUS_READY) FDCS->reset = 1; } /* locks the driver */ static int lock_fdc(int drive, bool interruptible) { if (WARN(atomic_read(&usage_count) == 0, "Trying to lock fdc while usage count=0\n")) return -1; if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy))) return -EINTR; command_status = FD_COMMAND_NONE; __reschedule_timeout(drive, "lock fdc"); set_fdc(drive); return 0; } /* unlocks the driver */ static void unlock_fdc(void) { unsigned long flags; raw_cmd = NULL; if (!test_bit(0, &fdc_busy)) DPRINT("FDC access conflict!\n"); if (do_floppy) DPRINT("device interrupt still active at FDC release: %pf!\n", do_floppy); command_status = FD_COMMAND_NONE; spin_lock_irqsave(&floppy_lock, flags); del_timer(&fd_timeout); cont = NULL; clear_bit(0, &fdc_busy); if (current_req || set_next_request()) do_fd_request(current_req->q); spin_unlock_irqrestore(&floppy_lock, flags); wake_up(&fdc_wait); } /* switches the motor off after a given timeout */ static void motor_off_callback(unsigned long nr) { unsigned char mask = ~(0x10 << UNIT(nr)); set_dor(FDC(nr), mask, 0); } /* schedules motor off */ static void floppy_off(unsigned int drive) { unsigned long volatile delta; int fdc = FDC(drive); if (!(FDCS->dor & (0x10 << UNIT(drive)))) return; del_timer(motor_off_timer + drive); /* make spindle stop in a position which minimizes spinup time * next time */ if (UDP->rps) { delta = jiffies - UDRS->first_read_date + HZ - UDP->spindown_offset; delta = ((delta * UDP->rps) % HZ) / UDP->rps; motor_off_timer[drive].expires = jiffies + UDP->spindown - delta; } add_timer(motor_off_timer + drive); } /* * cycle through all N_DRIVE floppy drives, for disk change testing. * stopping at current drive. This is done before any long operation, to * be sure to have up to date disk change information. */ static void scandrives(void) { int i; int drive; int saved_drive; if (DP->select_delay) return; saved_drive = current_drive; for (i = 0; i < N_DRIVE; i++) { drive = (saved_drive + i + 1) % N_DRIVE; if (UDRS->fd_ref == 0 || UDP->select_delay != 0) continue; /* skip closed drives */ set_fdc(drive); if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & (0x10 << UNIT(drive)))) /* switch the motor off again, if it was off to * begin with */ set_dor(fdc, ~(0x10 << UNIT(drive)), 0); } set_fdc(saved_drive); } static void empty(void) { } static DECLARE_WORK(floppy_work, NULL); static void schedule_bh(void (*handler)(void)) { PREPARE_WORK(&floppy_work, (work_func_t)handler); schedule_work(&floppy_work); } static DEFINE_TIMER(fd_timer, NULL, 0, 0); static void cancel_activity(void) { unsigned long flags; spin_lock_irqsave(&floppy_lock, flags); do_floppy = NULL; PREPARE_WORK(&floppy_work, (work_func_t)empty); del_timer(&fd_timer); spin_unlock_irqrestore(&floppy_lock, flags); } /* this function makes sure that the disk stays in the drive during the * transfer */ static void fd_watchdog(void) { debug_dcl(DP->flags, "calling disk change from watchdog\n"); if (disk_change(current_drive)) { DPRINT("disk removed during i/o\n"); cancel_activity(); cont->done(0); reset_fdc(); } else { del_timer(&fd_timer); fd_timer.function = (timeout_fn)fd_watchdog; fd_timer.expires = jiffies + HZ / 10; add_timer(&fd_timer); } } static void main_command_interrupt(void) { del_timer(&fd_timer); cont->interrupt(); } /* waits for a delay (spinup or select) to pass */ static int fd_wait_for_completion(unsigned long delay, timeout_fn function) { if (FDCS->reset) { reset_fdc(); /* do the reset during sleep to win time * if we don't need to sleep, it's a good * occasion anyways */ return 1; } if (time_before(jiffies, delay)) { del_timer(&fd_timer); fd_timer.function = function; fd_timer.expires = delay; add_timer(&fd_timer); return 1; } return 0; } static void setup_DMA(void) { unsigned long f; if (raw_cmd->length == 0) { int i; pr_info("zero dma transfer size:"); for (i = 0; i < raw_cmd->cmd_count; i++) pr_cont("%x,", raw_cmd->cmd[i]); pr_cont("\n"); cont->done(0); FDCS->reset = 1; return; } if (((unsigned long)raw_cmd->kernel_data) % 512) { pr_info("non aligned address: %p\n", raw_cmd->kernel_data); cont->done(0); FDCS->reset = 1; return; } f = claim_dma_lock(); fd_disable_dma(); #ifdef fd_dma_setup if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length, (raw_cmd->flags & FD_RAW_READ) ? DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) { release_dma_lock(f); cont->done(0); FDCS->reset = 1; return; } release_dma_lock(f); #else fd_clear_dma_ff(); fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length); fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ? DMA_MODE_READ : DMA_MODE_WRITE); fd_set_dma_addr(raw_cmd->kernel_data); fd_set_dma_count(raw_cmd->length); virtual_dma_port = FDCS->address; fd_enable_dma(); release_dma_lock(f); #endif } static void show_floppy(void); /* waits until the fdc becomes ready */ static int wait_til_ready(void) { int status; int counter; if (FDCS->reset) return -1; for (counter = 0; counter < 10000; counter++) { status = fd_inb(FD_STATUS); if (status & STATUS_READY) return status; } if (initialized) { DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc); show_floppy(); } FDCS->reset = 1; return -1; } /* sends a command byte to the fdc */ static int output_byte(char byte) { int status = wait_til_ready(); if (status < 0) return -1; if (is_ready_state(status)) { fd_outb(byte, FD_DATA); output_log[output_log_pos].data = byte; output_log[output_log_pos].status = status; output_log[output_log_pos].jiffies = jiffies; output_log_pos = (output_log_pos + 1) % OLOGSIZE; return 0; } FDCS->reset = 1; if (initialized) { DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n", byte, fdc, status); show_floppy(); } return -1; } /* gets the response from the fdc */ static int result(void) { int i; int status = 0; for (i = 0; i < MAX_REPLIES; i++) { status = wait_til_ready(); if (status < 0) break; status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA; if ((status & ~STATUS_BUSY) == STATUS_READY) { resultjiffies = jiffies; resultsize = i; return i; } if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY)) reply_buffer[i] = fd_inb(FD_DATA); else break; } if (initialized) { DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n", fdc, status, i); show_floppy(); } FDCS->reset = 1; return -1; } #define MORE_OUTPUT -2 /* does the fdc need more output? */ static int need_more_output(void) { int status = wait_til_ready(); if (status < 0) return -1; if (is_ready_state(status)) return MORE_OUTPUT; return result(); } /* Set perpendicular mode as required, based on data rate, if supported. * 82077 Now tested. 1Mbps data rate only possible with 82077-1. */ static void perpendicular_mode(void) { unsigned char perp_mode; if (raw_cmd->rate & 0x40) { switch (raw_cmd->rate & 3) { case 0: perp_mode = 2; break; case 3: perp_mode = 3; break; default: DPRINT("Invalid data rate for perpendicular mode!\n"); cont->done(0); FDCS->reset = 1; /* * convenient way to return to * redo without too much hassle * (deep stack et al.) */ return; } } else perp_mode = 0; if (FDCS->perp_mode == perp_mode) return; if (FDCS->version >= FDC_82077_ORIG) { output_byte(FD_PERPENDICULAR); output_byte(perp_mode); FDCS->perp_mode = perp_mode; } else if (perp_mode) { DPRINT("perpendicular mode not supported by this FDC.\n"); } } /* perpendicular_mode */ static int fifo_depth = 0xa; static int no_fifo; static int fdc_configure(void) { /* Turn on FIFO */ output_byte(FD_CONFIGURE); if (need_more_output() != MORE_OUTPUT) return 0; output_byte(0); output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf)); output_byte(0); /* pre-compensation from track 0 upwards */ return 1; } #define NOMINAL_DTR 500 /* Issue a "SPECIFY" command to set the step rate time, head unload time, * head load time, and DMA disable flag to values needed by floppy. * * The value "dtr" is the data transfer rate in Kbps. It is needed * to account for the data rate-based scaling done by the 82072 and 82077 * FDC types. This parameter is ignored for other types of FDCs (i.e. * 8272a). * * Note that changing the data transfer rate has a (probably deleterious) * effect on the parameters subject to scaling for 82072/82077 FDCs, so * fdc_specify is called again after each data transfer rate * change. * * srt: 1000 to 16000 in microseconds * hut: 16 to 240 milliseconds * hlt: 2 to 254 milliseconds * * These values are rounded up to the next highest available delay time. */ static void fdc_specify(void) { unsigned char spec1; unsigned char spec2; unsigned long srt; unsigned long hlt; unsigned long hut; unsigned long dtr = NOMINAL_DTR; unsigned long scale_dtr = NOMINAL_DTR; int hlt_max_code = 0x7f; int hut_max_code = 0xf; if (FDCS->need_configure && FDCS->version >= FDC_82072A) { fdc_configure(); FDCS->need_configure = 0; } switch (raw_cmd->rate & 0x03) { case 3: dtr = 1000; break; case 1: dtr = 300; if (FDCS->version >= FDC_82078) { /* chose the default rate table, not the one * where 1 = 2 Mbps */ output_byte(FD_DRIVESPEC); if (need_more_output() == MORE_OUTPUT) { output_byte(UNIT(current_drive)); output_byte(0xc0); } } break; case 2: dtr = 250; break; } if (FDCS->version >= FDC_82072) { scale_dtr = dtr; hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */ } /* Convert step rate from microseconds to milliseconds and 4 bits */ srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR); if (slow_floppy) srt = srt / 4; SUPBOUND(srt, 0xf); INFBOUND(srt, 0); hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR); if (hlt < 0x01) hlt = 0x01; else if (hlt > 0x7f) hlt = hlt_max_code; hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR); if (hut < 0x1) hut = 0x1; else if (hut > 0xf) hut = hut_max_code; spec1 = (srt << 4) | hut; spec2 = (hlt << 1) | (use_virtual_dma & 1); /* If these parameters did not change, just return with success */ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) { /* Go ahead and set spec1 and spec2 */ output_byte(FD_SPECIFY); output_byte(FDCS->spec1 = spec1); output_byte(FDCS->spec2 = spec2); } } /* fdc_specify */ /* Set the FDC's data transfer rate on behalf of the specified drive. * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue * of the specify command (i.e. using the fdc_specify function). */ static int fdc_dtr(void) { /* If data rate not already set to desired value, set it. */ if ((raw_cmd->rate & 3) == FDCS->dtr) return 0; /* Set dtr */ fd_outb(raw_cmd->rate & 3, FD_DCR); /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB) * need a stabilization period of several milliseconds to be * enforced after data rate changes before R/W operations. * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) */ FDCS->dtr = raw_cmd->rate & 3; return fd_wait_for_completion(jiffies + 2UL * HZ / 100, (timeout_fn)floppy_ready); } /* fdc_dtr */ static void tell_sector(void) { pr_cont(": track %d, head %d, sector %d, size %d", R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE); } /* tell_sector */ static void print_errors(void) { DPRINT(""); if (ST0 & ST0_ECE) { pr_cont("Recalibrate failed!"); } else if (ST2 & ST2_CRC) { pr_cont("data CRC error"); tell_sector(); } else if (ST1 & ST1_CRC) { pr_cont("CRC error"); tell_sector(); } else if ((ST1 & (ST1_MAM | ST1_ND)) || (ST2 & ST2_MAM)) { if (!probing) { pr_cont("sector not found"); tell_sector(); } else pr_cont("probe failed..."); } else if (ST2 & ST2_WC) { /* seek error */ pr_cont("wrong cylinder"); } else if (ST2 & ST2_BC) { /* cylinder marked as bad */ pr_cont("bad cylinder"); } else { pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2); tell_sector(); } pr_cont("\n"); } /* * OK, this error interpreting routine is called after a * DMA read/write has succeeded * or failed, so we check the results, and copy any buffers. * hhb: Added better error reporting. * ak: Made this into a separate routine. */ static int interpret_errors(void) { char bad; if (inr != 7) { DPRINT("-- FDC reply error\n"); FDCS->reset = 1; return 1; } /* check IC to find cause of interrupt */ switch (ST0 & ST0_INTR) { case 0x40: /* error occurred during command execution */ if (ST1 & ST1_EOC) return 0; /* occurs with pseudo-DMA */ bad = 1; if (ST1 & ST1_WP) { DPRINT("Drive is write protected\n"); clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); cont->done(0); bad = 2; } else if (ST1 & ST1_ND) { set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); } else if (ST1 & ST1_OR) { if (DP->flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; } else if (*errors >= DP->max_errors.reporting) { print_errors(); } if (ST2 & ST2_WC || ST2 & ST2_BC) /* wrong cylinder => recal */ DRS->track = NEED_2_RECAL; return bad; case 0x80: /* invalid command given */ DPRINT("Invalid FDC command given!\n"); cont->done(0); return 2; case 0xc0: DPRINT("Abnormal termination caused by polling\n"); cont->error(); return 2; default: /* (0) Normal command termination */ return 0; } } /* * This routine is called when everything should be correctly set up * for the transfer (i.e. floppy motor is on, the correct floppy is * selected, and the head is sitting on the right track). */ static void setup_rw_floppy(void) { int i; int r; int flags; int dflags; unsigned long ready_date; timeout_fn function; flags = raw_cmd->flags; if (flags & (FD_RAW_READ | FD_RAW_WRITE)) flags |= FD_RAW_INTR; if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) { ready_date = DRS->spinup_date + DP->spinup; /* If spinup will take a long time, rerun scandrives * again just before spinup completion. Beware that * after scandrives, we must again wait for selection. */ if (time_after(ready_date, jiffies + DP->select_delay)) { ready_date -= DP->select_delay; function = (timeout_fn)floppy_start; } else function = (timeout_fn)setup_rw_floppy; /* wait until the floppy is spinning fast enough */ if (fd_wait_for_completion(ready_date, function)) return; } dflags = DRS->flags; if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE)) setup_DMA(); if (flags & FD_RAW_INTR) do_floppy = main_command_interrupt; r = 0; for (i = 0; i < raw_cmd->cmd_count; i++) r |= output_byte(raw_cmd->cmd[i]); debugt(__func__, "rw_command"); if (r) { cont->error(); reset_fdc(); return; } if (!(flags & FD_RAW_INTR)) { inr = result(); cont->interrupt(); } else if (flags & FD_RAW_NEED_DISK) fd_watchdog(); } static int blind_seek; /* * This is the routine called after every seek (or recalibrate) interrupt * from the floppy controller. */ static void seek_interrupt(void) { debugt(__func__, ""); if (inr != 2 || (ST0 & 0xF8) != 0x20) { DPRINT("seek failed\n"); DRS->track = NEED_2_RECAL; cont->error(); cont->redo(); return; } if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) { debug_dcl(DP->flags, "clearing NEWCHANGE flag because of effective seek\n"); debug_dcl(DP->flags, "jiffies=%lu\n", jiffies); clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); /* effective seek */ DRS->select_date = jiffies; } DRS->track = ST1; floppy_ready(); } static void check_wp(void) { if (test_bit(FD_VERIFY_BIT, &DRS->flags)) { /* check write protection */ output_byte(FD_GETSTATUS); output_byte(UNIT(current_drive)); if (result() != 1) { FDCS->reset = 1; return; } clear_bit(FD_VERIFY_BIT, &DRS->flags); clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); debug_dcl(DP->flags, "checking whether disk is write protected\n"); debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40); if (!(ST3 & 0x40)) set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); else clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); } } static void seek_floppy(void) { int track; blind_seek = 0; debug_dcl(DP->flags, "calling disk change from %s\n", __func__); if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) { /* the media changed flag should be cleared after the seek. * If it isn't, this means that there is really no disk in * the drive. */ set_bit(FD_DISK_CHANGED_BIT, &DRS->flags); cont->done(0); cont->redo(); return; } if (DRS->track <= NEED_1_RECAL) { recalibrate_floppy(); return; } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && (raw_cmd->flags & FD_RAW_NEED_DISK) && (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) { /* we seek to clear the media-changed condition. Does anybody * know a more elegant way, which works on all drives? */ if (raw_cmd->track) track = raw_cmd->track - 1; else { if (DP->flags & FD_SILENT_DCL_CLEAR) { set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0); blind_seek = 1; raw_cmd->flags |= FD_RAW_NEED_SEEK; } track = 1; } } else { check_wp(); if (raw_cmd->track != DRS->track && (raw_cmd->flags & FD_RAW_NEED_SEEK)) track = raw_cmd->track; else { setup_rw_floppy(); return; } } do_floppy = seek_interrupt; output_byte(FD_SEEK); output_byte(UNIT(current_drive)); if (output_byte(track) < 0) { reset_fdc(); return; } debugt(__func__, ""); } static void recal_interrupt(void) { debugt(__func__, ""); if (inr != 2) FDCS->reset = 1; else if (ST0 & ST0_ECE) { switch (DRS->track) { case NEED_1_RECAL: debugt(__func__, "need 1 recal"); /* after a second recalibrate, we still haven't * reached track 0. Probably no drive. Raise an * error, as failing immediately might upset * computers possessed by the Devil :-) */ cont->error(); cont->redo(); return; case NEED_2_RECAL: debugt(__func__, "need 2 recal"); /* If we already did a recalibrate, * and we are not at track 0, this * means we have moved. (The only way * not to move at recalibration is to * be already at track 0.) Clear the * new change flag */ debug_dcl(DP->flags, "clearing NEWCHANGE flag because of second recalibrate\n"); clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); DRS->select_date = jiffies; /* fall through */ default: debugt(__func__, "default"); /* Recalibrate moves the head by at * most 80 steps. If after one * recalibrate we don't have reached * track 0, this might mean that we * started beyond track 80. Try * again. */ DRS->track = NEED_1_RECAL; break; } } else DRS->track = ST1; floppy_ready(); } static void print_result(char *message, int inr) { int i; DPRINT("%s ", message); if (inr >= 0) for (i = 0; i < inr; i++) pr_cont("repl[%d]=%x ", i, reply_buffer[i]); pr_cont("\n"); } /* interrupt handler. Note that this can be called externally on the Sparc */ irqreturn_t floppy_interrupt(int irq, void *dev_id) { int do_print; unsigned long f; void (*handler)(void) = do_floppy; lasthandler = handler; interruptjiffies = jiffies; f = claim_dma_lock(); fd_disable_dma(); release_dma_lock(f); do_floppy = NULL; if (fdc >= N_FDC || FDCS->address == -1) { /* we don't even know which FDC is the culprit */ pr_info("DOR0=%x\n", fdc_state[0].dor); pr_info("floppy interrupt on bizarre fdc %d\n", fdc); pr_info("handler=%pf\n", handler); is_alive(__func__, "bizarre fdc"); return IRQ_NONE; } FDCS->reset = 0; /* We have to clear the reset flag here, because apparently on boxes * with level triggered interrupts (PS/2, Sparc, ...), it is needed to * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the * emission of the SENSEI's. * It is OK to emit floppy commands because we are in an interrupt * handler here, and thus we have to fear no interference of other * activity. */ do_print = !handler && print_unex && initialized; inr = result(); if (do_print) print_result("unexpected interrupt", inr); if (inr == 0) { int max_sensei = 4; do { output_byte(FD_SENSEI); inr = result(); if (do_print) print_result("sensei", inr); max_sensei--; } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei); } if (!handler) { FDCS->reset = 1; return IRQ_NONE; } schedule_bh(handler); is_alive(__func__, "normal interrupt end"); /* FIXME! Was it really for us? */ return IRQ_HANDLED; } static void recalibrate_floppy(void) { debugt(__func__, ""); do_floppy = recal_interrupt; output_byte(FD_RECALIBRATE); if (output_byte(UNIT(current_drive)) < 0) reset_fdc(); } /* * Must do 4 FD_SENSEIs after reset because of ``drive polling''. */ static void reset_interrupt(void) { debugt(__func__, ""); result(); /* get the status ready for set_fdc */ if (FDCS->reset) { pr_info("reset set in interrupt, calling %pf\n", cont->error); cont->error(); /* a reset just after a reset. BAD! */ } cont->redo(); } /* * reset is done by pulling bit 2 of DOR low for a while (old FDCs), * or by setting the self clearing bit 7 of STATUS (newer FDCs) */ static void reset_fdc(void) { unsigned long flags; do_floppy = reset_interrupt; FDCS->reset = 0; reset_fdc_info(0); /* Pseudo-DMA may intercept 'reset finished' interrupt. */ /* Irrelevant for systems with true DMA (i386). */ flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); if (FDCS->version >= FDC_82072A) fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS); else { fd_outb(FDCS->dor & ~0x04, FD_DOR); udelay(FD_RESET_DELAY); fd_outb(FDCS->dor, FD_DOR); } } static void show_floppy(void) { int i; pr_info("\n"); pr_info("floppy driver state\n"); pr_info("-------------------\n"); pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n", jiffies, interruptjiffies, jiffies - interruptjiffies, lasthandler); pr_info("timeout_message=%s\n", timeout_message); pr_info("last output bytes:\n"); for (i = 0; i < OLOGSIZE; i++) pr_info("%2x %2x %lu\n", output_log[(i + output_log_pos) % OLOGSIZE].data, output_log[(i + output_log_pos) % OLOGSIZE].status, output_log[(i + output_log_pos) % OLOGSIZE].jiffies); pr_info("last result at %lu\n", resultjiffies); pr_info("last redo_fd_request at %lu\n", lastredo); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, reply_buffer, resultsize, true); pr_info("status=%x\n", fd_inb(FD_STATUS)); pr_info("fdc_busy=%lu\n", fdc_busy); if (do_floppy) pr_info("do_floppy=%pf\n", do_floppy); if (work_pending(&floppy_work)) pr_info("floppy_work.func=%pf\n", floppy_work.func); if (timer_pending(&fd_timer)) pr_info("fd_timer.function=%pf\n", fd_timer.function); if (timer_pending(&fd_timeout)) { pr_info("timer_function=%pf\n", fd_timeout.function); pr_info("expires=%lu\n", fd_timeout.expires - jiffies); pr_info("now=%lu\n", jiffies); } pr_info("cont=%p\n", cont); pr_info("current_req=%p\n", current_req); pr_info("command_status=%d\n", command_status); pr_info("\n"); } static void floppy_shutdown(unsigned long data) { unsigned long flags; if (initialized) show_floppy(); cancel_activity(); flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); /* avoid dma going to a random drive after shutdown */ if (initialized) DPRINT("floppy timeout called\n"); FDCS->reset = 1; if (cont) { cont->done(0); cont->redo(); /* this will recall reset when needed */ } else { pr_info("no cont in shutdown!\n"); process_fd_request(); } is_alive(__func__, ""); } /* start motor, check media-changed condition and write protection */ static int start_motor(void (*function)(void)) { int mask; int data; mask = 0xfc; data = UNIT(current_drive); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) { if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) { set_debugt(); /* no read since this drive is running */ DRS->first_read_date = 0; /* note motor start time if motor is not yet running */ DRS->spinup_date = jiffies; data |= (0x10 << UNIT(current_drive)); } } else if (FDCS->dor & (0x10 << UNIT(current_drive))) mask &= ~(0x10 << UNIT(current_drive)); /* starts motor and selects floppy */ del_timer(motor_off_timer + current_drive); set_dor(fdc, mask, data); /* wait_for_completion also schedules reset if needed. */ return fd_wait_for_completion(DRS->select_date + DP->select_delay, (timeout_fn)function); } static void floppy_ready(void) { if (FDCS->reset) { reset_fdc(); return; } if (start_motor(floppy_ready)) return; if (fdc_dtr()) return; debug_dcl(DP->flags, "calling disk change from floppy_ready\n"); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) && disk_change(current_drive) && !DP->select_delay) twaddle(); /* this clears the dcl on certain * drive/controller combinations */ #ifdef fd_chose_dma_mode if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) { unsigned long flags = claim_dma_lock(); fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length); release_dma_lock(flags); } #endif if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) { perpendicular_mode(); fdc_specify(); /* must be done here because of hut, hlt ... */ seek_floppy(); } else { if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) fdc_specify(); setup_rw_floppy(); } } static void floppy_start(void) { reschedule_timeout(current_reqD, "floppy start"); scandrives(); debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n"); set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); floppy_ready(); } /* * ======================================================================== * here ends the bottom half. Exported routines are: * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc, * start_motor, reset_fdc, reset_fdc_info, interpret_errors. * Initialization also uses output_byte, result, set_dor, floppy_interrupt * and set_dor. * ======================================================================== */ /* * General purpose continuations. * ============================== */ static void do_wakeup(void) { reschedule_timeout(MAXTIMEOUT, "do wakeup"); cont = NULL; command_status += 2; wake_up(&command_done); } static const struct cont_t wakeup_cont = { .interrupt = empty, .redo = do_wakeup, .error = empty, .done = (done_f)empty }; static const struct cont_t intr_cont = { .interrupt = empty, .redo = process_fd_request, .error = empty, .done = (done_f)empty }; static int wait_til_done(void (*handler)(void), bool interruptible) { int ret; schedule_bh(handler); if (interruptible) wait_event_interruptible(command_done, command_status >= 2); else wait_event(command_done, command_status >= 2); if (command_status < 2) { cancel_activity(); cont = &intr_cont; reset_fdc(); return -EINTR; } if (FDCS->reset) command_status = FD_COMMAND_ERROR; if (command_status == FD_COMMAND_OKAY) ret = 0; else ret = -EIO; command_status = FD_COMMAND_NONE; return ret; } static void generic_done(int result) { command_status = result; cont = &wakeup_cont; } static void generic_success(void) { cont->done(1); } static void generic_failure(void) { cont->done(0); } static void success_and_wakeup(void) { generic_success(); cont->redo(); } /* * formatting and rw support. * ========================== */ static int next_valid_format(void) { int probed_format; probed_format = DRS->probed_format; while (1) { if (probed_format >= 8 || !DP->autodetect[probed_format]) { DRS->probed_format = 0; return 1; } if (floppy_type[DP->autodetect[probed_format]].sect) { DRS->probed_format = probed_format; return 0; } probed_format++; } } static void bad_flp_intr(void) { int err_count; if (probing) { DRS->probed_format++; if (!next_valid_format()) return; } err_count = ++(*errors); INFBOUND(DRWE->badness, err_count); if (err_count > DP->max_errors.abort) cont->done(0); if (err_count > DP->max_errors.reset) FDCS->reset = 1; else if (err_count > DP->max_errors.recal) DRS->track = NEED_2_RECAL; } static void set_floppy(int drive) { int type = ITYPE(UDRS->fd_device); if (type) _floppy = floppy_type + type; else _floppy = current_type[drive]; } /* * formatting support. * =================== */ static void format_interrupt(void) { switch (interpret_errors()) { case 1: cont->error(); case 2: break; case 0: cont->done(1); } cont->redo(); } #define FM_MODE(x, y) ((y) & ~(((x)->rate & 0x80) >> 1)) #define CT(x) ((x) | 0xc0) static void setup_format_params(int track) { int n; int il; int count; int head_shift; int track_shift; struct fparm { unsigned char track, head, sect, size; } *here = (struct fparm *)floppy_track_buffer; raw_cmd = &default_raw_cmd; raw_cmd->track = track; raw_cmd->flags = (FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK); raw_cmd->rate = _floppy->rate & 0x43; raw_cmd->cmd_count = NR_F; COMMAND = FM_MODE(_floppy, FD_FORMAT); DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); F_SIZECODE = FD_SIZECODE(_floppy); F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE; F_GAP = _floppy->fmt_gap; F_FILL = FD_FILL_BYTE; raw_cmd->kernel_data = floppy_track_buffer; raw_cmd->length = 4 * F_SECT_PER_TRACK; /* allow for about 30ms for data transport per track */ head_shift = (F_SECT_PER_TRACK + 5) / 6; /* a ``cylinder'' is two tracks plus a little stepping time */ track_shift = 2 * head_shift + 3; /* position of logical sector 1 on this track */ n = (track_shift * format_req.track + head_shift * format_req.head) % F_SECT_PER_TRACK; /* determine interleave */ il = 1; if (_floppy->fmt_gap < 0x22) il++; /* initialize field */ for (count = 0; count < F_SECT_PER_TRACK; ++count) { here[count].track = format_req.track; here[count].head = format_req.head; here[count].sect = 0; here[count].size = F_SIZECODE; } /* place logical sectors */ for (count = 1; count <= F_SECT_PER_TRACK; ++count) { here[n].sect = count; n = (n + il) % F_SECT_PER_TRACK; if (here[n].sect) { /* sector busy, find next free sector */ ++n; if (n >= F_SECT_PER_TRACK) { n -= F_SECT_PER_TRACK; while (here[n].sect) ++n; } } } if (_floppy->stretch & FD_SECTBASEMASK) { for (count = 0; count < F_SECT_PER_TRACK; count++) here[count].sect += FD_SECTBASE(_floppy) - 1; } } static void redo_format(void) { buffer_track = -1; setup_format_params(format_req.track << STRETCH(_floppy)); floppy_start(); debugt(__func__, "queue format request"); } static const struct cont_t format_cont = { .interrupt = format_interrupt, .redo = redo_format, .error = bad_flp_intr, .done = generic_done }; static int do_format(int drive, struct format_descr *tmp_format_req) { int ret; if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); if (!_floppy || _floppy->track > DP->tracks || tmp_format_req->track >= _floppy->track || tmp_format_req->head >= _floppy->head || (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) || !_floppy->fmt_gap) { process_fd_request(); return -EINVAL; } format_req = *tmp_format_req; format_errors = 0; cont = &format_cont; errors = &format_errors; ret = wait_til_done(redo_format, true); if (ret == -EINTR) return -EINTR; process_fd_request(); return ret; } /* * Buffer read/write and support * ============================= */ static void floppy_end_request(struct request *req, int error) { unsigned int nr_sectors = current_count_sectors; unsigned int drive = (unsigned long)req->rq_disk->private_data; /* current_count_sectors can be zero if transfer failed */ if (error) nr_sectors = blk_rq_cur_sectors(req); if (__blk_end_request(req, error, nr_sectors << 9)) return; /* We're done with the request */ floppy_off(drive); current_req = NULL; } /* new request_done. Can handle physical sectors which are smaller than a * logical buffer */ static void request_done(int uptodate) { struct request *req = current_req; struct request_queue *q; unsigned long flags; int block; char msg[sizeof("request done ") + sizeof(int) * 3]; probing = 0; snprintf(msg, sizeof(msg), "request done %d", uptodate); reschedule_timeout(MAXTIMEOUT, msg); if (!req) { pr_info("floppy.c: no request in request_done\n"); return; } q = req->q; if (uptodate) { /* maintain values for invalidation on geometry * change */ block = current_count_sectors + blk_rq_pos(req); INFBOUND(DRS->maxblock, block); if (block > _floppy->sect) DRS->maxtrack = 1; /* unlock chained buffers */ spin_lock_irqsave(q->queue_lock, flags); floppy_end_request(req, 0); spin_unlock_irqrestore(q->queue_lock, flags); } else { if (rq_data_dir(req) == WRITE) { /* record write error information */ DRWE->write_errors++; if (DRWE->write_errors == 1) { DRWE->first_error_sector = blk_rq_pos(req); DRWE->first_error_generation = DRS->generation; } DRWE->last_error_sector = blk_rq_pos(req); DRWE->last_error_generation = DRS->generation; } spin_lock_irqsave(q->queue_lock, flags); floppy_end_request(req, -EIO); spin_unlock_irqrestore(q->queue_lock, flags); } } /* Interrupt handler evaluating the result of the r/w operation */ static void rw_interrupt(void) { int eoc; int ssize; int heads; int nr_sectors; if (R_HEAD >= 2) { /* some Toshiba floppy controllers occasionnally seem to * return bogus interrupts after read/write operations, which * can be recognized by a bad head number (>= 2) */ return; } if (!DRS->first_read_date) DRS->first_read_date = jiffies; nr_sectors = 0; ssize = DIV_ROUND_UP(1 << SIZECODE, 4); if (ST1 & ST1_EOC) eoc = 1; else eoc = 0; if (COMMAND & 0x80) heads = 2; else heads = 1; nr_sectors = (((R_TRACK - TRACK) * heads + R_HEAD - HEAD) * SECT_PER_TRACK + R_SECTOR - SECTOR + eoc) << SIZECODE >> 2; if (nr_sectors / ssize > DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) { DPRINT("long rw: %x instead of %lx\n", nr_sectors, current_count_sectors); pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR); pr_info("rh=%d h=%d\n", R_HEAD, HEAD); pr_info("rt=%d t=%d\n", R_TRACK, TRACK); pr_info("heads=%d eoc=%d\n", heads, eoc); pr_info("spt=%d st=%d ss=%d\n", SECT_PER_TRACK, fsector_t, ssize); pr_info("in_sector_offset=%d\n", in_sector_offset); } nr_sectors -= in_sector_offset; INFBOUND(nr_sectors, 0); SUPBOUND(current_count_sectors, nr_sectors); switch (interpret_errors()) { case 2: cont->redo(); return; case 1: if (!current_count_sectors) { cont->error(); cont->redo(); return; } break; case 0: if (!current_count_sectors) { cont->redo(); return; } current_type[current_drive] = _floppy; floppy_sizes[TOMINOR(current_drive)] = _floppy->size; break; } if (probing) { if (DP->flags & FTD_MSG) DPRINT("Auto-detected floppy type %s in fd%d\n", _floppy->name, current_drive); current_type[current_drive] = _floppy; floppy_sizes[TOMINOR(current_drive)] = _floppy->size; probing = 0; } if (CT(COMMAND) != FD_READ || raw_cmd->kernel_data == current_req->buffer) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { buffer_track = raw_cmd->track; buffer_drive = current_drive; INFBOUND(buffer_max, nr_sectors + fsector_t); } cont->redo(); } /* Compute maximal contiguous buffer size. */ static int buffer_chain_size(void) { struct bio_vec *bv; int size; struct req_iterator iter; char *base; base = bio_data(current_req->bio); size = 0; rq_for_each_segment(bv, current_req, iter) { if (page_address(bv->bv_page) + bv->bv_offset != base + size) break; size += bv->bv_len; } return size >> 9; } /* Compute the maximal transfer size */ static int transfer_size(int ssize, int max_sector, int max_size) { SUPBOUND(max_sector, fsector_t + max_size); /* alignment */ max_sector -= (max_sector % _floppy->sect) % ssize; /* transfer size, beginning not aligned */ current_count_sectors = max_sector - fsector_t; return max_sector; } /* * Move data from/to the track buffer to/from the buffer cache. */ static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ struct bio_vec *bv; char *buffer; char *dma_buffer; int size; struct req_iterator iter; max_sector = transfer_size(ssize, min(max_sector, max_sector_2), blk_rq_sectors(current_req)); if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && buffer_max > fsector_t + blk_rq_sectors(current_req)) current_count_sectors = min_t(int, buffer_max - fsector_t, blk_rq_sectors(current_req)); remaining = current_count_sectors << 9; if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) { DPRINT("in copy buffer\n"); pr_info("current_count_sectors=%ld\n", current_count_sectors); pr_info("remaining=%d\n", remaining >> 9); pr_info("current_req->nr_sectors=%u\n", blk_rq_sectors(current_req)); pr_info("current_req->current_nr_sectors=%u\n", blk_rq_cur_sectors(current_req)); pr_info("max_sector=%d\n", max_sector); pr_info("ssize=%d\n", ssize); } buffer_max = max(max_sector, buffer_max); dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); size = blk_rq_cur_bytes(current_req); rq_for_each_segment(bv, current_req, iter) { if (!remaining) break; size = bv->bv_len; SUPBOUND(size, remaining); buffer = page_address(bv->bv_page) + bv->bv_offset; if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { DPRINT("buffer overrun in copy buffer %d\n", (int)((floppy_track_buffer - dma_buffer) >> 9)); pr_info("fsector_t=%d buffer_min=%d\n", fsector_t, buffer_min); pr_info("current_count_sectors=%ld\n", current_count_sectors); if (CT(COMMAND) == FD_READ) pr_info("read\n"); if (CT(COMMAND) == FD_WRITE) pr_info("write\n"); break; } if (((unsigned long)buffer) % 512) DPRINT("%p buffer not aligned\n", buffer); if (CT(COMMAND) == FD_READ) memcpy(buffer, dma_buffer, size); else memcpy(dma_buffer, buffer, size); remaining -= size; dma_buffer += size; } if (remaining) { if (remaining > 0) max_sector -= remaining >> 9; DPRINT("weirdness: remaining %d\n", remaining >> 9); } } /* work around a bug in pseudo DMA * (on some FDCs) pseudo DMA does not stop when the CPU stops * sending data. Hence we need a different way to signal the * transfer length: We use SECT_PER_TRACK. Unfortunately, this * does not work with MT, hence we can only transfer one head at * a time */ static void virtualdmabug_workaround(void) { int hard_sectors; int end_sector; if (CT(COMMAND) == FD_WRITE) { COMMAND &= ~0x80; /* switch off multiple track mode */ hard_sectors = raw_cmd->length >> (7 + SIZECODE); end_sector = SECTOR + hard_sectors - 1; if (end_sector > SECT_PER_TRACK) { pr_info("too many sectors %d > %d\n", end_sector, SECT_PER_TRACK); return; } SECT_PER_TRACK = end_sector; /* make sure SECT_PER_TRACK * points to end of transfer */ } } /* * Formulate a read/write request. * this routine decides where to load the data (directly to buffer, or to * tmp floppy area), how much data to load (the size of the buffer, the whole * track, or a single sector) * All floppy_track_buffer handling goes in here. If we ever add track buffer * allocation on the fly, it should be done here. No other part should need * modification. */ static int make_raw_rw_request(void) { int aligned_sector_t; int max_sector; int max_size; int tracksize; int ssize; if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n")) return 0; set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); } else if (rq_data_dir(current_req) == WRITE) { raw_cmd->flags |= FD_RAW_WRITE; COMMAND = FM_MODE(_floppy, FD_WRITE); } else { DPRINT("%s: unknown command\n", __func__); return 0; } max_sector = _floppy->sect * _floppy->head; TRACK = (int)blk_rq_pos(current_req) / max_sector; fsector_t = (int)blk_rq_pos(current_req) % max_sector; if (_floppy->track && TRACK >= _floppy->track) { if (blk_rq_cur_sectors(current_req) & 1) { current_count_sectors = 1; return 1; } else return 0; } HEAD = fsector_t / _floppy->sect; if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) || test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) && fsector_t < _floppy->sect) max_sector = _floppy->sect; /* 2M disks have phantom sectors on the first track */ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) { max_sector = 2 * _floppy->sect / 3; if (fsector_t >= max_sector) { current_count_sectors = min_t(int, _floppy->sect - fsector_t, blk_rq_sectors(current_req)); return 1; } SIZECODE = 2; } else SIZECODE = FD_SIZECODE(_floppy); raw_cmd->rate = _floppy->rate & 0x43; if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2) raw_cmd->rate = 1; if (SIZECODE) SIZECODE2 = 0xff; else SIZECODE2 = 0x80; raw_cmd->track = TRACK << STRETCH(_floppy); DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD); GAP = _floppy->gap; ssize = DIV_ROUND_UP(1 << SIZECODE, 4); SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + FD_SECTBASE(_floppy); /* tracksize describes the size which can be filled up with sectors * of size ssize. */ tracksize = _floppy->sect - _floppy->sect % ssize; if (tracksize < _floppy->sect) { SECT_PER_TRACK++; if (tracksize <= fsector_t % _floppy->sect) SECTOR--; /* if we are beyond tracksize, fill up using smaller sectors */ while (tracksize <= fsector_t % _floppy->sect) { while (tracksize + ssize > _floppy->sect) { SIZECODE--; ssize >>= 1; } SECTOR++; SECT_PER_TRACK++; tracksize += ssize; } max_sector = HEAD * _floppy->sect + tracksize; } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) { max_sector = _floppy->sect; } else if (!HEAD && CT(COMMAND) == FD_WRITE) { /* for virtual DMA bug workaround */ max_sector = _floppy->sect; } in_sector_offset = (fsector_t % _floppy->sect) % ssize; aligned_sector_t = fsector_t - in_sector_offset; max_size = blk_rq_sectors(current_req); if ((raw_cmd->track == buffer_track) && (current_drive == buffer_drive) && (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { /* data already in track buffer */ if (CT(COMMAND) == FD_READ) { copy_buffer(1, max_sector, buffer_max); return 1; } } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { if (CT(COMMAND) == FD_WRITE) { unsigned int sectors; sectors = fsector_t + blk_rq_sectors(current_req); if (sectors > ssize && sectors < ssize + ssize) max_size = ssize + ssize; else max_size = ssize; } raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; indirect = transfer_size(ssize, max_sector, max_buffer_sectors * 2) - fsector_t; /* * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide * on a 64 bit machine! */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - ((unsigned long)current_req->buffer)) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ if (CROSS_64KB(current_req->buffer, max_size << 9)) max_size = (K_64 - ((unsigned long)current_req->buffer) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* * We try to read tracks, but if we get too many errors, we * go back to reading just one sector at a time. * * This means we should be able to read a sector even if there * are other bad sectors on this track. */ if (!direct || (indirect * 2 > direct * 3 && *errors < DP->max_errors.read_track && ((!probing || (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { raw_cmd->kernel_data = current_req->buffer; raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); DPRINT("indirect=%d direct=%d fsector_t=%d\n", indirect, direct, fsector_t); return 0; } virtualdmabug_workaround(); return 2; } } if (CT(COMMAND) == FD_READ) max_size = max_sector; /* unbounded */ /* claim buffer track if needed */ if (buffer_track != raw_cmd->track || /* bad track */ buffer_drive != current_drive || /* bad drive */ fsector_t > buffer_max || fsector_t < buffer_min || ((CT(COMMAND) == FD_READ || (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && max_sector > 2 * max_buffer_sectors + buffer_min && max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) { /* not enough space */ buffer_track = -1; buffer_drive = current_drive; buffer_max = buffer_min = aligned_sector_t; } raw_cmd->kernel_data = floppy_track_buffer + ((aligned_sector_t - buffer_min) << 9); if (CT(COMMAND) == FD_WRITE) { /* copy write buffer to track buffer. * if we get here, we know that the write * is either aligned or the data already in the buffer * (buffer will be overwritten) */ if (in_sector_offset && buffer_track == -1) DPRINT("internal error offset !=0 on write\n"); buffer_track = raw_cmd->track; buffer_drive = current_drive; copy_buffer(ssize, max_sector, 2 * max_buffer_sectors + buffer_min); } else transfer_size(ssize, max_sector, 2 * max_buffer_sectors + buffer_min - aligned_sector_t); /* round up current_count_sectors to get dma xfer size */ raw_cmd->length = in_sector_offset + current_count_sectors; raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || (raw_cmd->kernel_data != current_req->buffer && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || raw_cmd->length % (128 << SIZECODE) || raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); if (raw_cmd->kernel_data != current_req->buffer) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), current_count_sectors); pr_info("st=%d ast=%d mse=%d msi=%d\n", fsector_t, aligned_sector_t, max_sector, max_size); pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE); pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n", COMMAND, SECTOR, HEAD, TRACK); pr_info("buffer drive=%d\n", buffer_drive); pr_info("buffer track=%d\n", buffer_track); pr_info("buffer_min=%d\n", buffer_min); pr_info("buffer_max=%d\n", buffer_max); return 0; } if (raw_cmd->kernel_data != current_req->buffer) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || raw_cmd->kernel_data + raw_cmd->length > floppy_track_buffer + (max_buffer_sectors << 10)) { DPRINT("buffer overrun in schedule dma\n"); pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n", fsector_t, buffer_min, raw_cmd->length >> 9); pr_info("current_count_sectors=%ld\n", current_count_sectors); if (CT(COMMAND) == FD_READ) pr_info("read\n"); if (CT(COMMAND) == FD_WRITE) pr_info("write\n"); return 0; } } else if (raw_cmd->length > blk_rq_bytes(current_req) || current_count_sectors > blk_rq_sectors(current_req)) { DPRINT("buffer overrun in direct transfer\n"); return 0; } else if (raw_cmd->length < current_count_sectors << 9) { DPRINT("more sectors than bytes\n"); pr_info("bytes=%ld\n", raw_cmd->length >> 9); pr_info("sectors=%ld\n", current_count_sectors); } if (raw_cmd->length == 0) { DPRINT("zero dma transfer attempted from make_raw_request\n"); return 0; } virtualdmabug_workaround(); return 2; } /* * Round-robin between our available drives, doing one request from each */ static int set_next_request(void) { struct request_queue *q; int old_pos = fdc_queue; do { q = disks[fdc_queue]->queue; if (++fdc_queue == N_DRIVE) fdc_queue = 0; if (q) { current_req = blk_fetch_request(q); if (current_req) break; } } while (fdc_queue != old_pos); return current_req != NULL; } static void redo_fd_request(void) { int drive; int tmp; lastredo = jiffies; if (current_drive < N_DRIVE) floppy_off(current_drive); do_request: if (!current_req) { int pending; spin_lock_irq(&floppy_lock); pending = set_next_request(); spin_unlock_irq(&floppy_lock); if (!pending) { do_floppy = NULL; unlock_fdc(); return; } } drive = (long)current_req->rq_disk->private_data; set_fdc(drive); reschedule_timeout(current_reqD, "redo fd request"); set_floppy(drive); raw_cmd = &default_raw_cmd; raw_cmd->flags = 0; if (start_motor(redo_fd_request)) return; disk_change(current_drive); if (test_bit(current_drive, &fake_change) || test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) { DPRINT("disk absent or changed during operation\n"); request_done(0); goto do_request; } if (!_floppy) { /* Autodetection */ if (!probing) { DRS->probed_format = 0; if (next_valid_format()) { DPRINT("no autodetectable formats\n"); _floppy = NULL; request_done(0); goto do_request; } } probing = 1; _floppy = floppy_type + DP->autodetect[DRS->probed_format]; } else probing = 0; errors = &(current_req->errors); tmp = make_raw_rw_request(); if (tmp < 2) { request_done(tmp); goto do_request; } if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) twaddle(); schedule_bh(floppy_start); debugt(__func__, "queue fd request"); return; } static const struct cont_t rw_cont = { .interrupt = rw_interrupt, .redo = redo_fd_request, .error = bad_flp_intr, .done = request_done }; static void process_fd_request(void) { cont = &rw_cont; schedule_bh(redo_fd_request); } static void do_fd_request(struct request_queue *q) { if (WARN(max_buffer_sectors == 0, "VFS: %s called on non-open device\n", __func__)) return; if (WARN(atomic_read(&usage_count) == 0, "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, current_req->cmd_flags)) return; if (test_bit(0, &fdc_busy)) { /* fdc busy, this new request will be treated when the current one is done */ is_alive(__func__, "old request running"); return; } lock_fdc(MAXTIMEOUT, false); process_fd_request(); is_alive(__func__, ""); } static const struct cont_t poll_cont = { .interrupt = success_and_wakeup, .redo = floppy_ready, .error = generic_failure, .done = generic_done }; static int poll_drive(bool interruptible, int flag) { /* no auto-sense, just clear dcl */ raw_cmd = &default_raw_cmd; raw_cmd->flags = flag; raw_cmd->track = 0; raw_cmd->cmd_count = 0; cont = &poll_cont; debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n"); set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); return wait_til_done(floppy_ready, interruptible); } /* * User triggered reset * ==================== */ static void reset_intr(void) { pr_info("weird, reset interrupt called\n"); } static const struct cont_t reset_cont = { .interrupt = reset_intr, .redo = success_and_wakeup, .error = generic_failure, .done = generic_done }; static int user_reset_fdc(int drive, int arg, bool interruptible) { int ret; if (lock_fdc(drive, interruptible)) return -EINTR; if (arg == FD_RESET_ALWAYS) FDCS->reset = 1; if (FDCS->reset) { cont = &reset_cont; ret = wait_til_done(reset_fdc, interruptible); if (ret == -EINTR) return -EINTR; } process_fd_request(); return 0; } /* * Misc Ioctl's and support * ======================== */ static inline int fd_copyout(void __user *param, const void *address, unsigned long size) { return copy_to_user(param, address, size) ? -EFAULT : 0; } static inline int fd_copyin(void __user *param, void *address, unsigned long size) { return copy_from_user(address, param, size) ? -EFAULT : 0; } static const char *drive_name(int type, int drive) { struct floppy_struct *floppy; if (type) floppy = floppy_type + type; else { if (UDP->native_format) floppy = floppy_type + UDP->native_format; else return "(null)"; } if (floppy->name) return floppy->name; else return "(null)"; } /* raw commands */ static void raw_cmd_done(int flag) { int i; if (!flag) { raw_cmd->flags |= FD_RAW_FAILURE; raw_cmd->flags |= FD_RAW_HARDFAILURE; } else { raw_cmd->reply_count = inr; if (raw_cmd->reply_count > MAX_REPLIES) raw_cmd->reply_count = 0; for (i = 0; i < raw_cmd->reply_count; i++) raw_cmd->reply[i] = reply_buffer[i]; if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) { unsigned long flags; flags = claim_dma_lock(); raw_cmd->length = fd_get_dma_residue(); release_dma_lock(flags); } if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) && (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0))) raw_cmd->flags |= FD_RAW_FAILURE; if (disk_change(current_drive)) raw_cmd->flags |= FD_RAW_DISK_CHANGE; else raw_cmd->flags &= ~FD_RAW_DISK_CHANGE; if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER) motor_off_callback(current_drive); if (raw_cmd->next && (!(raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) && ((raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) { raw_cmd = raw_cmd->next; return; } } generic_done(flag); } static const struct cont_t raw_cmd_cont = { .interrupt = success_and_wakeup, .redo = floppy_start, .error = generic_failure, .done = raw_cmd_done }; static int raw_cmd_copyout(int cmd, void __user *param, struct floppy_raw_cmd *ptr) { int ret; while (ptr) { ret = copy_to_user(param, ptr, sizeof(*ptr)); if (ret) return -EFAULT; param += sizeof(struct floppy_raw_cmd); if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) { if (ptr->length >= 0 && ptr->length <= ptr->buffer_length) { long length = ptr->buffer_length - ptr->length; ret = fd_copyout(ptr->data, ptr->kernel_data, length); if (ret) return ret; } } ptr = ptr->next; } return 0; } static void raw_cmd_free(struct floppy_raw_cmd **ptr) { struct floppy_raw_cmd *next; struct floppy_raw_cmd *this; this = *ptr; *ptr = NULL; while (this) { if (this->buffer_length) { fd_dma_mem_free((unsigned long)this->kernel_data, this->buffer_length); this->buffer_length = 0; } next = this->next; kfree(this); this = next; } } static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) { struct floppy_raw_cmd *ptr; int ret; int i; *rcmd = NULL; loop: ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); if (!ptr) return -ENOMEM; *rcmd = ptr; ret = copy_from_user(ptr, param, sizeof(*ptr)); if (ret) return -EFAULT; ptr->next = NULL; ptr->buffer_length = 0; param += sizeof(struct floppy_raw_cmd); if (ptr->cmd_count > 33) /* the command may now also take up the space * initially intended for the reply & the * reply count. Needed for long 82078 commands * such as RESTORE, which takes ... 17 command * bytes. Murphy's law #137: When you reserve * 16 bytes for a structure, you'll one day * discover that you really need 17... */ return -EINVAL; for (i = 0; i < 16; i++) ptr->reply[i] = 0; ptr->resultcode = 0; ptr->kernel_data = NULL; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { if (ptr->length <= 0) return -EINVAL; ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); if (!ptr->kernel_data) return -ENOMEM; ptr->buffer_length = ptr->length; } if (ptr->flags & FD_RAW_WRITE) { ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length); if (ret) return ret; } if (ptr->flags & FD_RAW_MORE) { rcmd = &(ptr->next); ptr->rate &= 0x43; goto loop; } return 0; } static int raw_cmd_ioctl(int cmd, void __user *param) { struct floppy_raw_cmd *my_raw_cmd; int drive; int ret2; int ret; if (FDCS->rawcmd <= 1) FDCS->rawcmd = 1; for (drive = 0; drive < N_DRIVE; drive++) { if (FDC(drive) != fdc) continue; if (drive == current_drive) { if (UDRS->fd_ref > 1) { FDCS->rawcmd = 2; break; } } else if (UDRS->fd_ref) { FDCS->rawcmd = 2; break; } } if (FDCS->reset) return -EIO; ret = raw_cmd_copyin(cmd, param, &my_raw_cmd); if (ret) { raw_cmd_free(&my_raw_cmd); return ret; } raw_cmd = my_raw_cmd; cont = &raw_cmd_cont; ret = wait_til_done(floppy_start, true); debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n"); if (ret != -EINTR && FDCS->reset) ret = -EIO; DRS->track = NO_TRACK; ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd); if (!ret) ret = ret2; raw_cmd_free(&my_raw_cmd); return ret; } static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ set_bit((long)bdev->bd_disk->private_data, &fake_change); process_fd_request(); check_disk_change(bdev); return 0; } static int set_geometry(unsigned int cmd, struct floppy_struct *g, int drive, int type, struct block_device *bdev) { int cnt; /* sanity checking for parameters. */ if (g->sect <= 0 || g->head <= 0 || g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || /* check if reserved bits are set */ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0) return -EINVAL; if (type) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&open_lock); if (lock_fdc(drive, true)) { mutex_unlock(&open_lock); return -EINTR; } floppy_type[type] = *g; floppy_type[type].name = "user format"; for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] = floppy_type[type].size + 1; process_fd_request(); for (cnt = 0; cnt < N_DRIVE; cnt++) { struct block_device *bdev = opened_bdev[cnt]; if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) continue; __invalidate_device(bdev, true); } mutex_unlock(&open_lock); } else { int oldStretch; if (lock_fdc(drive, true)) return -EINTR; if (cmd != FDDEFPRM) { /* notice a disk change immediately, else * we lose our settings immediately*/ if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; } oldStretch = g->stretch; user_params[drive] = *g; if (buffer_drive == drive) SUPBOUND(buffer_max, user_params[drive].sect); current_type[drive] = &user_params[drive]; floppy_sizes[drive] = user_params[drive].size; if (cmd == FDDEFPRM) DRS->keep_data = -1; else DRS->keep_data = 1; /* invalidation. Invalidate only when needed, i.e. * when there are already sectors in the buffer cache * whose number will change. This is useful, because * mtools often changes the geometry of the disk after * looking at the boot block */ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack || ((user_params[drive].sect ^ oldStretch) & (FD_SWAPSIDES | FD_SECTBASEMASK))) invalidate_drive(bdev); else process_fd_request(); } return 0; } /* handle obsolete ioctl's */ static unsigned int ioctl_table[] = { FDCLRPRM, FDSETPRM, FDDEFPRM, FDGETPRM, FDMSGON, FDMSGOFF, FDFMTBEG, FDFMTTRK, FDFMTEND, FDSETEMSGTRESH, FDFLUSH, FDSETMAXERRS, FDGETMAXERRS, FDGETDRVTYP, FDSETDRVPRM, FDGETDRVPRM, FDGETDRVSTAT, FDPOLLDRVSTAT, FDRESET, FDGETFDCSTAT, FDWERRORCLR, FDWERRORGET, FDRAWCMD, FDEJECT, FDTWADDLE }; static int normalize_ioctl(unsigned int *cmd, int *size) { int i; for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) { if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) { *size = _IOC_SIZE(*cmd); *cmd = ioctl_table[i]; if (*size > _IOC_SIZE(*cmd)) { pr_info("ioctl not yet supported\n"); return -EFAULT; } return 0; } } return -EINVAL; } static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) { if (type) *g = &floppy_type[type]; else { if (lock_fdc(drive, false)) return -EINTR; if (poll_drive(false, 0) == -EINTR) return -EINTR; process_fd_request(); *g = current_type[drive]; } if (!*g) return -ENODEV; return 0; } static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); struct floppy_struct *g; int ret; ret = get_floppy_geometry(drive, type, &g); if (ret) return ret; geo->heads = g->head; geo->sectors = g->sect; geo->cylinders = g->track; return 0; } static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(UDRS->fd_device); int i; int ret; int size; union inparam { struct floppy_struct g; /* geometry */ struct format_descr f; struct floppy_max_errors max_errors; struct floppy_drive_params dp; } inparam; /* parameters coming from user space */ const void *outparam; /* parameters passed back to user space */ /* convert compatibility eject ioctls into floppy eject ioctl. * We do this in order to provide a means to eject floppy disks before * installing the new fdutils package */ if (cmd == CDROMEJECT || /* CD-ROM eject */ cmd == 0x6470) { /* SunOS floppy eject */ DPRINT("obsolete eject ioctl\n"); DPRINT("please use floppycontrol --eject\n"); cmd = FDEJECT; } if (!((cmd & 0xff00) == 0x0200)) return -EINVAL; /* convert the old style command into a new style command */ ret = normalize_ioctl(&cmd, &size); if (ret) return ret; /* permission checks */ if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) || ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) return -EPERM; if (WARN_ON(size < 0 || size > sizeof(inparam))) return -EINVAL; /* copyin */ memset(&inparam, 0, sizeof(inparam)); if (_IOC_DIR(cmd) & _IOC_WRITE) { ret = fd_copyin((void __user *)param, &inparam, size); if (ret) return ret; } switch (cmd) { case FDEJECT: if (UDRS->fd_ref != 1) /* somebody else has this drive open */ return -EBUSY; if (lock_fdc(drive, true)) return -EINTR; /* do the actual eject. Fails on * non-Sparc architectures */ ret = fd_eject(UNIT(drive)); set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); process_fd_request(); return ret; case FDCLRPRM: if (lock_fdc(drive, true)) return -EINTR; current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; UDRS->keep_data = 0; return invalidate_drive(bdev); case FDSETPRM: case FDDEFPRM: return set_geometry(cmd, &inparam.g, drive, type, bdev); case FDGETPRM: ret = get_floppy_geometry(drive, type, (struct floppy_struct **)&outparam); if (ret) return ret; break; case FDMSGON: UDP->flags |= FTD_MSG; return 0; case FDMSGOFF: UDP->flags &= ~FTD_MSG; return 0; case FDFMTBEG: if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; ret = UDRS->flags; process_fd_request(); if (ret & FD_VERIFY) return -ENODEV; if (!(ret & FD_DISK_WRITABLE)) return -EROFS; return 0; case FDFMTTRK: if (UDRS->fd_ref != 1) return -EBUSY; return do_format(drive, &inparam.f); case FDFMTEND: case FDFLUSH: if (lock_fdc(drive, true)) return -EINTR; return invalidate_drive(bdev); case FDSETEMSGTRESH: UDP->max_errors.reporting = (unsigned short)(param & 0x0f); return 0; case FDGETMAXERRS: outparam = &UDP->max_errors; break; case FDSETMAXERRS: UDP->max_errors = inparam.max_errors; break; case FDGETDRVTYP: outparam = drive_name(type, drive); SUPBOUND(size, strlen((const char *)outparam) + 1); break; case FDSETDRVPRM: *UDP = inparam.dp; break; case FDGETDRVPRM: outparam = UDP; break; case FDPOLLDRVSTAT: if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; process_fd_request(); /* fall through */ case FDGETDRVSTAT: outparam = UDRS; break; case FDRESET: return user_reset_fdc(drive, (int)param, true); case FDGETFDCSTAT: outparam = UFDCS; break; case FDWERRORCLR: memset(UDRWE, 0, sizeof(*UDRWE)); return 0; case FDWERRORGET: outparam = UDRWE; break; case FDRAWCMD: if (type) return -EINVAL; if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); i = raw_cmd_ioctl(cmd, (void __user *)param); if (i == -EINTR) return -EINTR; process_fd_request(); return i; case FDTWADDLE: if (lock_fdc(drive, true)) return -EINTR; twaddle(); process_fd_request(); return 0; default: return -EINVAL; } if (_IOC_DIR(cmd) & _IOC_READ) return fd_copyout((void __user *)param, outparam, size); return 0; } static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int ret; mutex_lock(&floppy_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, param); mutex_unlock(&floppy_mutex); return ret; } static void __init config_types(void) { bool has_drive = false; int drive; /* read drive info out of physical CMOS */ drive = 0; if (!UDP->cmos) UDP->cmos = FLOPPY0_TYPE; drive = 1; if (!UDP->cmos && FLOPPY1_TYPE) UDP->cmos = FLOPPY1_TYPE; /* FIXME: additional physical CMOS drive detection should go here */ for (drive = 0; drive < N_DRIVE; drive++) { unsigned int type = UDP->cmos; struct floppy_drive_params *params; const char *name = NULL; static char temparea[32]; if (type < ARRAY_SIZE(default_drive_params)) { params = &default_drive_params[type].params; if (type) { name = default_drive_params[type].name; allowed_drive_mask |= 1 << drive; } else allowed_drive_mask &= ~(1 << drive); } else { params = &default_drive_params[0].params; sprintf(temparea, "unknown type %d (usb?)", type); name = temparea; } if (name) { const char *prepend; if (!has_drive) { prepend = ""; has_drive = true; pr_info("Floppy drive(s):"); } else { prepend = ","; } pr_cont("%s fd%d is %s", prepend, drive, name); } *UDP = *params; } if (has_drive) pr_cont("\n"); } static int floppy_release(struct gendisk *disk, fmode_t mode) { int drive = (long)disk->private_data; mutex_lock(&floppy_mutex); mutex_lock(&open_lock); if (UDRS->fd_ref < 0) UDRS->fd_ref = 0; else if (!UDRS->fd_ref--) { DPRINT("floppy_release with fd_ref == 0"); UDRS->fd_ref = 0; } if (!UDRS->fd_ref) opened_bdev[drive] = NULL; mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; } /* * floppy_open check for aliasing (/dev/fd0 can be the same as * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ static int floppy_open(struct block_device *bdev, fmode_t mode) { int drive = (long)bdev->bd_disk->private_data; int old_dev, new_dev; int try; int res = -EBUSY; char *tmp; mutex_lock(&floppy_mutex); mutex_lock(&open_lock); old_dev = UDRS->fd_device; if (opened_bdev[drive] && opened_bdev[drive] != bdev) goto out2; if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) { set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); } if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL))) goto out2; if (mode & FMODE_EXCL) UDRS->fd_ref = -1; else UDRS->fd_ref++; opened_bdev[drive] = bdev; res = -ENXIO; if (!floppy_track_buffer) { /* if opening an ED drive, reserve a big buffer, * else reserve a small one */ if ((UDP->cmos == 6) || (UDP->cmos == 5)) try = 64; /* Only 48 actually useful */ else try = 32; /* Only 24 actually useful */ tmp = (char *)fd_dma_mem_alloc(1024 * try); if (!tmp && !floppy_track_buffer) { try >>= 1; /* buffer only one side */ INFBOUND(try, 16); tmp = (char *)fd_dma_mem_alloc(1024 * try); } if (!tmp && !floppy_track_buffer) fallback_on_nodma_alloc(&tmp, 2048 * try); if (!tmp && !floppy_track_buffer) { DPRINT("Unable to allocate DMA memory\n"); goto out; } if (floppy_track_buffer) { if (tmp) fd_dma_mem_free((unsigned long)tmp, try * 1024); } else { buffer_min = buffer_max = -1; floppy_track_buffer = tmp; max_buffer_sectors = try; } } new_dev = MINOR(bdev->bd_dev); UDRS->fd_device = new_dev; set_capacity(disks[drive], floppy_sizes[new_dev]); if (old_dev != -1 && old_dev != new_dev) { if (buffer_drive == drive) buffer_track = -1; } if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; if (!(mode & FMODE_NDELAY)) { if (mode & (FMODE_READ|FMODE_WRITE)) { UDRS->last_checked = 0; check_disk_change(bdev); if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) goto out; } res = -EROFS; if ((mode & FMODE_WRITE) && !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) goto out; } mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; out: if (UDRS->fd_ref < 0) UDRS->fd_ref = 0; else UDRS->fd_ref--; if (!UDRS->fd_ref) opened_bdev[drive] = NULL; out2: mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return res; } /* * Check if the disk has been changed or if a change has been faked. */ static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing) { int drive = (long)disk->private_data; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)) return DISK_EVENT_MEDIA_CHANGE; if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { lock_fdc(drive, false); poll_drive(false, 0); process_fd_request(); } if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) return DISK_EVENT_MEDIA_CHANGE; return 0; } /* * This implements "read block 0" for floppy_revalidate(). * Needed for format autodetection, checking whether there is * a disk in the drive, and whether that disk is writable. */ static void floppy_rb0_complete(struct bio *bio, int err) { complete((struct completion *)bio->bi_private); } static int __floppy_read_block_0(struct block_device *bdev) { struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; size_t size; page = alloc_page(GFP_NOIO); if (!page) { process_fd_request(); return -ENOMEM; } size = bdev->bd_block_size; if (!size) size = 1024; bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = size; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = size; bio.bi_bdev = bdev; bio.bi_sector = 0; bio.bi_flags = (1 << BIO_QUIET); init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = floppy_rb0_complete; submit_bio(READ, &bio); process_fd_request(); wait_for_completion(&complete); __free_page(page); return 0; } /* revalidate the floppy disk, i.e. trigger format autodetection by reading * the bootblock (block 0). "Autodetection" is also needed to check whether * there is a disk in the drive at all... Thus we also do it for fixed * geometry formats */ static int floppy_revalidate(struct gendisk *disk) { int drive = (long)disk->private_data; int cf; int res = 0; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) { if (WARN(atomic_read(&usage_count) == 0, "VFS: revalidate called on non-open device.\n")) return -EFAULT; lock_fdc(drive, false); cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)); if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { process_fd_request(); /*already done by another thread */ return 0; } UDRS->maxblock = 0; UDRS->maxtrack = 0; if (buffer_drive == drive) buffer_track = -1; clear_bit(drive, &fake_change); clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); if (cf) UDRS->generation++; if (drive_no_geom(drive)) { /* auto-sensing */ res = __floppy_read_block_0(opened_bdev[drive]); } else { if (cf) poll_drive(false, FD_RAW_NEED_DISK); process_fd_request(); } } set_capacity(disk, floppy_sizes[UDRS->fd_device]); return res; } static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, .ioctl = fd_ioctl, .getgeo = fd_getgeo, .check_events = floppy_check_events, .revalidate_disk = floppy_revalidate, }; /* * Floppy Driver initialization * ============================= */ /* Determine the floppy disk controller type */ /* This routine was written by David C. Niemi */ static char __init get_fdc_version(void) { int r; output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */ if (FDCS->reset) return FDC_NONE; r = result(); if (r <= 0x00) return FDC_NONE; /* No FDC present ??? */ if ((r == 1) && (reply_buffer[0] == 0x80)) { pr_info("FDC %d is an 8272A\n", fdc); return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ } if (r != 10) { pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } if (!fdc_configure()) { pr_info("FDC %d is an 82072\n", fdc); return FDC_82072; /* 82072 doesn't know CONFIGURE */ } output_byte(FD_PERPENDICULAR); if (need_more_output() == MORE_OUTPUT) { output_byte(0); } else { pr_info("FDC %d is an 82072A\n", fdc); return FDC_82072A; /* 82072A as found on Sparcs. */ } output_byte(FD_UNLOCK); r = result(); if ((r == 1) && (reply_buffer[0] == 0x80)) { pr_info("FDC %d is a pre-1991 82077\n", fdc); return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know * LOCK/UNLOCK */ } if ((r != 1) || (reply_buffer[0] != 0x00)) { pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } output_byte(FD_PARTID); r = result(); if (r != 1) { pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } if (reply_buffer[0] == 0x80) { pr_info("FDC %d is a post-1991 82077\n", fdc); return FDC_82077; /* Revised 82077AA passes all the tests */ } switch (reply_buffer[0] >> 5) { case 0x0: /* Either a 82078-1 or a 82078SL running at 5Volt */ pr_info("FDC %d is an 82078.\n", fdc); return FDC_82078; case 0x1: pr_info("FDC %d is a 44pin 82078\n", fdc); return FDC_82078; case 0x2: pr_info("FDC %d is a S82078B\n", fdc); return FDC_S82078B; case 0x3: pr_info("FDC %d is a National Semiconductor PC87306\n", fdc); return FDC_87306; default: pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n", fdc, reply_buffer[0] >> 5); return FDC_82078_UNKN; } } /* get_fdc_version */ /* lilo configuration */ static void __init floppy_set_flags(int *ints, int param, int param2) { int i; for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { if (param) default_drive_params[i].params.flags |= param2; else default_drive_params[i].params.flags &= ~param2; } DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param); } static void __init daring(int *ints, int param, int param2) { int i; for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { if (param) { default_drive_params[i].params.select_delay = 0; default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR; } else { default_drive_params[i].params.select_delay = 2 * HZ / 100; default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR; } } DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken"); } static void __init set_cmos(int *ints, int dummy, int dummy2) { int current_drive = 0; if (ints[0] != 2) { DPRINT("wrong number of parameters for CMOS\n"); return; } current_drive = ints[1]; if (current_drive < 0 || current_drive >= 8) { DPRINT("bad drive for set_cmos\n"); return; } #if N_FDC > 1 if (current_drive >= 4 && !FDC2) FDC2 = 0x370; #endif DP->cmos = ints[2]; DPRINT("setting CMOS code to %d\n", ints[2]); } static struct param_table { const char *name; void (*fn) (int *ints, int param, int param2); int *var; int def_param; int param2; } config_params[] __initdata = { {"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ {"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ {"asus_pci", NULL, &allowed_drive_mask, 0x33, 0}, {"irq", NULL, &FLOPPY_IRQ, 6, 0}, {"dma", NULL, &FLOPPY_DMA, 2, 0}, {"daring", daring, NULL, 1, 0}, #if N_FDC > 1 {"two_fdc", NULL, &FDC2, 0x370, 0}, {"one_fdc", NULL, &FDC2, 0, 0}, #endif {"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL}, {"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL}, {"messages", floppy_set_flags, NULL, 1, FTD_MSG}, {"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR}, {"debug", floppy_set_flags, NULL, 1, FD_DEBUG}, {"nodma", NULL, &can_use_virtual_dma, 1, 0}, {"omnibook", NULL, &can_use_virtual_dma, 1, 0}, {"yesdma", NULL, &can_use_virtual_dma, 0, 0}, {"fifo_depth", NULL, &fifo_depth, 0xa, 0}, {"nofifo", NULL, &no_fifo, 0x20, 0}, {"usefifo", NULL, &no_fifo, 0, 0}, {"cmos", set_cmos, NULL, 0, 0}, {"slow", NULL, &slow_floppy, 1, 0}, {"unexpected_interrupts", NULL, &print_unex, 1, 0}, {"no_unexpected_interrupts", NULL, &print_unex, 0, 0}, {"L40SX", NULL, &print_unex, 0, 0} EXTRA_FLOPPY_PARAMS }; static int __init floppy_setup(char *str) { int i; int param; int ints[11]; str = get_options(str, ARRAY_SIZE(ints), ints); if (str) { for (i = 0; i < ARRAY_SIZE(config_params); i++) { if (strcmp(str, config_params[i].name) == 0) { if (ints[0]) param = ints[1]; else param = config_params[i].def_param; if (config_params[i].fn) config_params[i].fn(ints, param, config_params[i]. param2); if (config_params[i].var) { DPRINT("%s=%d\n", str, param); *config_params[i].var = param; } return 1; } } } if (str) { DPRINT("unknown floppy option [%s]\n", str); DPRINT("allowed options are:"); for (i = 0; i < ARRAY_SIZE(config_params); i++) pr_cont(" %s", config_params[i].name); pr_cont("\n"); } else DPRINT("botched floppy option\n"); DPRINT("Read Documentation/blockdev/floppy.txt\n"); return 0; } static int have_no_fdc = -ENODEV; static ssize_t floppy_cmos_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *p = to_platform_device(dev); int drive; drive = p->id; return sprintf(buf, "%X\n", UDP->cmos); } static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); static void floppy_device_release(struct device *dev) { } static int floppy_resume(struct device *dev) { int fdc; for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) user_reset_fdc(-1, FD_RESET_ALWAYS, false); return 0; } static const struct dev_pm_ops floppy_pm_ops = { .resume = floppy_resume, .restore = floppy_resume, }; static struct platform_driver floppy_driver = { .driver = { .name = "floppy", .pm = &floppy_pm_ops, }, }; static struct platform_device floppy_device[N_DRIVE]; static struct kobject *floppy_find(dev_t dev, int *part, void *data) { int drive = (*part & 3) | ((*part & 0x80) >> 5); if (drive >= N_DRIVE || !(allowed_drive_mask & (1 << drive)) || fdc_state[FDC(drive)].version == FDC_NONE) return NULL; if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) return NULL; *part = 0; return get_disk(disks[drive]); } static int __init floppy_init(void) { int i, unit, drive; int err, dr; set_debugt(); interruptjiffies = resultjiffies = jiffies; #if defined(CONFIG_PPC) if (check_legacy_ioport(FDC1)) return -ENODEV; #endif raw_cmd = NULL; for (dr = 0; dr < N_DRIVE; dr++) { disks[dr] = alloc_disk(1); if (!disks[dr]) { err = -ENOMEM; goto out_put_disk; } disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); if (!disks[dr]->queue) { err = -ENOMEM; goto out_put_disk; } blk_queue_max_hw_sectors(disks[dr]->queue, 64); disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; sprintf(disks[dr]->disk_name, "fd%d", dr); init_timer(&motor_off_timer[dr]); motor_off_timer[dr].data = dr; motor_off_timer[dr].function = motor_off_callback; } err = register_blkdev(FLOPPY_MAJOR, "fd"); if (err) goto out_put_disk; err = platform_driver_register(&floppy_driver); if (err) goto out_unreg_blkdev; blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, floppy_find, NULL, NULL); for (i = 0; i < 256; i++) if (ITYPE(i)) floppy_sizes[i] = floppy_type[ITYPE(i)].size; else floppy_sizes[i] = MAX_DISK_SIZE << 1; reschedule_timeout(MAXTIMEOUT, "floppy init"); config_types(); for (i = 0; i < N_FDC; i++) { fdc = i; memset(FDCS, 0, sizeof(*FDCS)); FDCS->dtr = -1; FDCS->dor = 0x4; #if defined(__sparc__) || defined(__mc68000__) /*sparcs/sun3x don't have a DOR reset which we can fall back on to */ #ifdef __mc68000__ if (MACH_IS_SUN3X) #endif FDCS->version = FDC_82072A; #endif } use_virtual_dma = can_use_virtual_dma & 1; fdc_state[0].address = FDC1; if (fdc_state[0].address == -1) { del_timer_sync(&fd_timeout); err = -ENODEV; goto out_unreg_region; } #if N_FDC > 1 fdc_state[1].address = FDC2; #endif fdc = 0; /* reset fdc in case of unexpected interrupt */ err = floppy_grab_irq_and_dma(); if (err) { del_timer_sync(&fd_timeout); err = -EBUSY; goto out_unreg_region; } /* initialise drive state */ for (drive = 0; drive < N_DRIVE; drive++) { memset(UDRS, 0, sizeof(*UDRS)); memset(UDRWE, 0, sizeof(*UDRWE)); set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); UDRS->fd_device = -1; floppy_track_buffer = NULL; max_buffer_sectors = 0; } /* * Small 10 msec delay to let through any interrupt that * initialization might have triggered, to not * confuse detection: */ msleep(10); for (i = 0; i < N_FDC; i++) { fdc = i; FDCS->driver_version = FD_DRIVER_VERSION; for (unit = 0; unit < 4; unit++) FDCS->track[unit] = 0; if (FDCS->address == -1) continue; FDCS->rawcmd = 2; if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) { /* free ioports reserved by floppy_grab_irq_and_dma() */ floppy_release_regions(fdc); FDCS->address = -1; FDCS->version = FDC_NONE; continue; } /* Try to determine the floppy controller type */ FDCS->version = get_fdc_version(); if (FDCS->version == FDC_NONE) { /* free ioports reserved by floppy_grab_irq_and_dma() */ floppy_release_regions(fdc); FDCS->address = -1; continue; } if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A) can_use_virtual_dma = 0; have_no_fdc = 0; /* Not all FDCs seem to be able to handle the version command * properly, so force a reset for the standard FDC clones, * to avoid interrupt garbage. */ user_reset_fdc(-1, FD_RESET_ALWAYS, false); } fdc = 0; del_timer_sync(&fd_timeout); current_drive = 0; initialized = true; if (have_no_fdc) { DPRINT("no floppy controllers found\n"); err = have_no_fdc; goto out_flush_work; } for (drive = 0; drive < N_DRIVE; drive++) { if (!(allowed_drive_mask & (1 << drive))) continue; if (fdc_state[FDC(drive)].version == FDC_NONE) continue; floppy_device[drive].name = floppy_device_name; floppy_device[drive].id = drive; floppy_device[drive].dev.release = floppy_device_release; err = platform_device_register(&floppy_device[drive]); if (err) goto out_flush_work; err = device_create_file(&floppy_device[drive].dev, &dev_attr_cmos); if (err) goto out_unreg_platform_dev; /* to be cleaned up... */ disks[drive]->private_data = (void *)(long)drive; disks[drive]->flags |= GENHD_FL_REMOVABLE; disks[drive]->driverfs_dev = &floppy_device[drive].dev; add_disk(disks[drive]); } return 0; out_unreg_platform_dev: platform_device_unregister(&floppy_device[drive]); out_flush_work: flush_work_sync(&floppy_work); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); out_unreg_region: blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); platform_driver_unregister(&floppy_driver); out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: while (dr--) { del_timer_sync(&motor_off_timer[dr]); if (disks[dr]->queue) { blk_cleanup_queue(disks[dr]->queue); /* * put_disk() is not paired with add_disk() and * will put queue reference one extra time. fix it. */ disks[dr]->queue = NULL; } put_disk(disks[dr]); } return err; } static const struct io_region { int offset; int size; } io_regions[] = { { 2, 1 }, /* address + 3 is sometimes reserved by pnp bios for motherboard */ { 4, 2 }, /* address + 6 is reserved, and may be taken by IDE. * Unfortunately, Adaptec doesn't know this :-(, */ { 7, 1 }, }; static void floppy_release_allocated_regions(int fdc, const struct io_region *p) { while (p != io_regions) { p--; release_region(FDCS->address + p->offset, p->size); } } #define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)])) static int floppy_request_regions(int fdc) { const struct io_region *p; for (p = io_regions; p < ARRAY_END(io_regions); p++) { if (!request_region(FDCS->address + p->offset, p->size, "floppy")) { DPRINT("Floppy io-port 0x%04lx in use\n", FDCS->address + p->offset); floppy_release_allocated_regions(fdc, p); return -EBUSY; } } return 0; } static void floppy_release_regions(int fdc) { floppy_release_allocated_regions(fdc, ARRAY_END(io_regions)); } static int floppy_grab_irq_and_dma(void) { if (atomic_inc_return(&usage_count) > 1) return 0; /* * We might have scheduled a free_irq(), wait it to * drain first: */ flush_work_sync(&floppy_work); if (fd_request_irq()) { DPRINT("Unable to grab IRQ%d for the floppy driver\n", FLOPPY_IRQ); atomic_dec(&usage_count); return -1; } if (fd_request_dma()) { DPRINT("Unable to grab DMA%d for the floppy driver\n", FLOPPY_DMA); if (can_use_virtual_dma & 2) use_virtual_dma = can_use_virtual_dma = 1; if (!(can_use_virtual_dma & 1)) { fd_free_irq(); atomic_dec(&usage_count); return -1; } } for (fdc = 0; fdc < N_FDC; fdc++) { if (FDCS->address != -1) { if (floppy_request_regions(fdc)) goto cleanup; } } for (fdc = 0; fdc < N_FDC; fdc++) { if (FDCS->address != -1) { reset_fdc_info(1); fd_outb(FDCS->dor, FD_DOR); } } fdc = 0; set_dor(0, ~0, 8); /* avoid immediate interrupt */ for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) fd_outb(FDCS->dor, FD_DOR); /* * The driver will try and free resources and relies on us * to know if they were allocated or not. */ fdc = 0; irqdma_allocated = 1; return 0; cleanup: fd_free_irq(); fd_free_dma(); while (--fdc >= 0) floppy_release_regions(fdc); atomic_dec(&usage_count); return -1; } static void floppy_release_irq_and_dma(void) { int old_fdc; #ifndef __sparc__ int drive; #endif long tmpsize; unsigned long tmpaddr; if (!atomic_dec_and_test(&usage_count)) return; if (irqdma_allocated) { fd_disable_dma(); fd_free_dma(); fd_free_irq(); irqdma_allocated = 0; } set_dor(0, ~0, 8); #if N_FDC > 1 set_dor(1, ~8, 0); #endif if (floppy_track_buffer && max_buffer_sectors) { tmpsize = max_buffer_sectors * 1024; tmpaddr = (unsigned long)floppy_track_buffer; floppy_track_buffer = NULL; max_buffer_sectors = 0; buffer_min = buffer_max = -1; fd_dma_mem_free(tmpaddr, tmpsize); } #ifndef __sparc__ for (drive = 0; drive < N_FDC * 4; drive++) if (timer_pending(motor_off_timer + drive)) pr_info("motor off timer %d still active\n", drive); #endif if (timer_pending(&fd_timeout)) pr_info("floppy timer still active:%s\n", timeout_message); if (timer_pending(&fd_timer)) pr_info("auxiliary floppy timer still active\n"); if (work_pending(&floppy_work)) pr_info("work still pending\n"); old_fdc = fdc; for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) floppy_release_regions(fdc); fdc = old_fdc; } #ifdef MODULE static char *floppy; static void __init parse_floppy_cfg_string(char *cfg) { char *ptr; while (*cfg) { ptr = cfg; while (*cfg && *cfg != ' ' && *cfg != '\t') cfg++; if (*cfg) { *cfg = '\0'; cfg++; } if (*ptr) floppy_setup(ptr); } } static int __init floppy_module_init(void) { if (floppy) parse_floppy_cfg_string(floppy); return floppy_init(); } module_init(floppy_module_init); static void __exit floppy_module_exit(void) { int drive; blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); if ((allowed_drive_mask & (1 << drive)) && fdc_state[FDC(drive)].version != FDC_NONE) { del_gendisk(disks[drive]); device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); platform_device_unregister(&floppy_device[drive]); } blk_cleanup_queue(disks[drive]->queue); /* * These disks have not called add_disk(). Don't put down * queue reference in put_disk(). */ if (!(allowed_drive_mask & (1 << drive)) || fdc_state[FDC(drive)].version == FDC_NONE) disks[drive]->queue = NULL; put_disk(disks[drive]); } del_timer_sync(&fd_timeout); del_timer_sync(&fd_timer); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); /* eject disk, if any */ fd_eject(0); } module_exit(floppy_module_exit); module_param(floppy, charp, 0); module_param(FLOPPY_IRQ, int, 0); module_param(FLOPPY_DMA, int, 0); MODULE_AUTHOR("Alain L. Knaff"); MODULE_SUPPORTED_DEVICE("fd"); MODULE_LICENSE("GPL"); /* This doesn't actually get used other than for module information */ static const struct pnp_device_id floppy_pnpids[] = { {"PNP0700", 0}, {} }; MODULE_DEVICE_TABLE(pnp, floppy_pnpids); #else __setup("floppy=", floppy_setup); module_init(floppy_init) #endif MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
gpl-2.0
chadouming/honami-stock
net/sched/sch_atm.c
4262
19370
/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */ /* Written 1998-2000 by Werner Almesberger, EPFL ICA */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atmclip.h> #include <linux/rtnetlink.h> #include <linux/file.h> /* for fput */ #include <net/netlink.h> #include <net/pkt_sched.h> extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */ /* * The ATM queuing discipline provides a framework for invoking classifiers * (aka "filters"), which in turn select classes of this queuing discipline. * Each class maps the flow(s) it is handling to a given VC. Multiple classes * may share the same VC. * * When creating a class, VCs are specified by passing the number of the open * socket descriptor by which the calling process references the VC. The kernel * keeps the VC open at least until all classes using it are removed. * * In this file, most functions are named atm_tc_* to avoid confusion with all * the atm_* in net/atm. This naming convention differs from what's used in the * rest of net/sched. * * Known bugs: * - sometimes messes up the IP stack * - any manipulations besides the few operations described in the README, are * untested and likely to crash the system * - should lock the flow while there is data in the queue (?) */ #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) struct atm_flow_data { struct Qdisc *q; /* FIFO, TBF, etc. */ struct tcf_proto *filter_list; struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); /* chaining */ struct atm_qdisc_data *parent; /* parent qdisc */ struct socket *sock; /* for closing */ u32 classid; /* x:y type ID */ int ref; /* reference count */ struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; struct list_head list; struct atm_flow_data *excess; /* flow for excess traffic; NULL to set CLP instead */ int hdr_len; unsigned char hdr[0]; /* header data; MUST BE LAST */ }; struct atm_qdisc_data { struct atm_flow_data link; /* unclassified skbs go here */ struct list_head flows; /* NB: "link" is also on this list */ struct tasklet_struct task; /* dequeue tasklet */ }; /* ------------------------- Class/flow operations ------------------------- */ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; list_for_each_entry(flow, &p->flows, list) { if (flow->classid == classid) return flow; } return NULL; } static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)arg; pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", sch, p, flow, new, old); if (list_empty(&flow->list)) return -EINVAL; if (!new) new = &noop_qdisc; *old = flow->q; flow->q = new; if (*old) qdisc_reset(*old); return 0; } static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl) { struct atm_flow_data *flow = (struct atm_flow_data *)cl; pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow); return flow ? flow->q : NULL; } static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid) { struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch); struct atm_flow_data *flow; pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid); flow = lookup_flow(sch, classid); if (flow) flow->ref++; pr_debug("atm_tc_get: flow %p\n", flow); return (unsigned long)flow; } static unsigned long atm_tc_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return atm_tc_get(sch, classid); } /* * atm_tc_put handles all destructions, including the ones that are explicitly * requested (atm_tc_destroy, etc.). The assumption here is that we never drop * anything that still seems to be in use. */ static void atm_tc_put(struct Qdisc *sch, unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); if (--flow->ref) return; pr_debug("atm_tc_put: destroying\n"); list_del_init(&flow->list); pr_debug("atm_tc_put: qdisc %p\n", flow->q); qdisc_destroy(flow->q); tcf_destroy_chain(&flow->filter_list); if (flow->sock) { pr_debug("atm_tc_put: f_count %ld\n", file_count(flow->sock->file)); flow->vcc->pop = flow->old_pop; sockfd_put(flow->sock); } if (flow->excess) atm_tc_put(sch, (unsigned long)flow->excess); if (flow != &p->link) kfree(flow); /* * If flow == &p->link, the qdisc no longer works at this point and * needs to be removed. (By the caller of atm_tc_put.) */ } static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent; pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p); VCC2FLOW(vcc)->old_pop(vcc, skb); tasklet_schedule(&p->task); } static const u8 llc_oui_ip[] = { 0xaa, /* DSAP: non-ISO */ 0xaa, /* SSAP: non-ISO */ 0x03, /* Ctrl: Unnumbered Information Command PDU */ 0x00, /* OUI: EtherType */ 0x00, 0x00, 0x08, 0x00 }; /* Ethertype IP (0800) */ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = { [TCA_ATM_FD] = { .type = NLA_U32 }, [TCA_ATM_EXCESS] = { .type = NLA_U32 }, }; static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, struct nlattr **tca, unsigned long *arg) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)*arg; struct atm_flow_data *excess = NULL; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_ATM_MAX + 1]; struct socket *sock; int fd, error, hdr_len; void *hdr; pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt); /* * The concept of parents doesn't apply for this qdisc. */ if (parent && parent != TC_H_ROOT && parent != sch->handle) return -EINVAL; /* * ATM classes cannot be changed. In order to change properties of the * ATM connection, that socket needs to be modified directly (via the * native ATM API. In order to send a flow to a different VC, the old * class needs to be removed and a new one added. (This may be changed * later.) */ if (flow) return -EBUSY; if (opt == NULL) return -EINVAL; error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy); if (error < 0) return error; if (!tb[TCA_ATM_FD]) return -EINVAL; fd = nla_get_u32(tb[TCA_ATM_FD]); pr_debug("atm_tc_change: fd %d\n", fd); if (tb[TCA_ATM_HDR]) { hdr_len = nla_len(tb[TCA_ATM_HDR]); hdr = nla_data(tb[TCA_ATM_HDR]); } else { hdr_len = RFC1483LLC_LEN; hdr = NULL; /* default LLC/SNAP for IP */ } if (!tb[TCA_ATM_EXCESS]) excess = NULL; else { excess = (struct atm_flow_data *) atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS])); if (!excess) return -ENOENT; } pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n", opt->nla_type, nla_len(opt), hdr_len); sock = sockfd_lookup(fd, &error); if (!sock) return error; /* f_count++ */ pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file)); if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { error = -EPROTOTYPE; goto err_out; } /* @@@ should check if the socket is really operational or we'll crash on vcc->send */ if (classid) { if (TC_H_MAJ(classid ^ sch->handle)) { pr_debug("atm_tc_change: classid mismatch\n"); error = -EINVAL; goto err_out; } } else { int i; unsigned long cl; for (i = 1; i < 0x8000; i++) { classid = TC_H_MAKE(sch->handle, 0x8000 | i); cl = atm_tc_get(sch, classid); if (!cl) break; atm_tc_put(sch, cl); } } pr_debug("atm_tc_change: new id %x\n", classid); flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); pr_debug("atm_tc_change: flow %p\n", flow); if (!flow) { error = -ENOBUFS; goto err_out; } flow->filter_list = NULL; flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!flow->q) flow->q = &noop_qdisc; pr_debug("atm_tc_change: qdisc %p\n", flow->q); flow->sock = sock; flow->vcc = ATM_SD(sock); /* speedup */ flow->vcc->user_back = flow; pr_debug("atm_tc_change: vcc %p\n", flow->vcc); flow->old_pop = flow->vcc->pop; flow->parent = p; flow->vcc->pop = sch_atm_pop; flow->classid = classid; flow->ref = 1; flow->excess = excess; list_add(&flow->list, &p->link.list); flow->hdr_len = hdr_len; if (hdr) memcpy(flow->hdr, hdr, hdr_len); else memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip)); *arg = (unsigned long)flow; return 0; err_out: if (excess) atm_tc_put(sch, (unsigned long)excess); sockfd_put(sock); return error; } static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)arg; pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); if (list_empty(&flow->list)) return -EINVAL; if (flow->filter_list || flow == &p->link) return -EBUSY; /* * Reference count must be 2: one for "keepalive" (set at class * creation), and one for the reference held when calling delete. */ if (flow->ref < 2) { pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); return -EINVAL; } if (flow->ref > 2) return -EBUSY; /* catch references via excess, etc. */ atm_tc_put(sch, arg); return 0; } static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); if (walker->stop) return; list_for_each_entry(flow, &p->flows, list) { if (walker->count >= walker->skip && walker->fn(sch, (unsigned long)flow, walker) < 0) { walker->stop = 1; break; } walker->count++; } } static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); return flow ? &flow->filter_list : &p->link.filter_list; } /* --------------------------- Qdisc operations ---------------------------- */ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; struct tcf_result res; int result; int ret = NET_XMIT_POLICED; pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); result = TC_POLICE_OK; /* be nice to gcc */ flow = NULL; if (TC_H_MAJ(skb->priority) != sch->handle || !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { list_for_each_entry(flow, &p->flows, list) { if (flow->filter_list) { result = tc_classify_compat(skb, flow->filter_list, &res); if (result < 0) continue; flow = (struct atm_flow_data *)res.class; if (!flow) flow = lookup_flow(sch, res.classid); goto done; } } flow = NULL; done: ; } if (!flow) { flow = &p->link; } else { if (flow->vcc) ATM_SKB(skb)->atm_options = flow->vcc->atm_options; /*@@@ looks good ... but it's not supposed to work :-) */ #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: kfree_skb(skb); return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: kfree_skb(skb); goto drop; case TC_POLICE_RECLASSIFY: if (flow->excess) flow = flow->excess; else ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; break; } #endif } ret = qdisc_enqueue(skb, flow->q); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { sch->qstats.drops++; if (flow) flow->qstats.drops++; } return ret; } qdisc_bstats_update(sch, skb); bstats_update(&flow->bstats, skb); /* * Okay, this may seem weird. We pretend we've dropped the packet if * it goes via ATM. The reason for this is that the outer qdisc * expects to be able to q->dequeue the packet later on if we return * success at this place. Also, sch->q.qdisc needs to reflect whether * there is a packet egligible for dequeuing or not. Note that the * statistics of the outer qdisc are necessarily wrong because of all * this. There's currently no correct solution for this. */ if (flow == &p->link) { sch->q.qlen++; return NET_XMIT_SUCCESS; } tasklet_schedule(&p->task); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } /* * Dequeue packets and send them over ATM. Note that we quite deliberately * avoid checking net_device's flow control here, simply because sch_atm * uses its own channels, which have nothing to do with any CLIP/LANE/or * non-ATM interfaces. */ static void sch_atm_dequeue(unsigned long data) { struct Qdisc *sch = (struct Qdisc *)data; struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; struct sk_buff *skb; pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) { if (flow == &p->link) continue; /* * If traffic is properly shaped, this won't generate nasty * little bursts. Otherwise, it may ... (but that's okay) */ while ((skb = flow->q->ops->peek(flow->q))) { if (!atm_may_send(flow->vcc, skb->truesize)) break; skb = qdisc_dequeue_peeked(flow->q); if (unlikely(!skb)) break; pr_debug("atm_tc_dequeue: sending on class %p\n", flow); /* remove any LL header somebody else has attached */ skb_pull(skb, skb_network_offset(skb)); if (skb_headroom(skb) < flow->hdr_len) { struct sk_buff *new; new = skb_realloc_headroom(skb, flow->hdr_len); dev_kfree_skb(skb); if (!new) continue; skb = new; } pr_debug("sch_atm_dequeue: ip %p, data %p\n", skb_network_header(skb), skb->data); ATM_SKB(skb)->vcc = flow->vcc; memcpy(skb_push(skb, flow->hdr_len), flow->hdr, flow->hdr_len); atomic_add(skb->truesize, &sk_atm(flow->vcc)->sk_wmem_alloc); /* atm.atm_options are already set by atm_tc_enqueue */ flow->vcc->send(flow->vcc, skb); } } } static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); struct sk_buff *skb; pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p); tasklet_schedule(&p->task); skb = qdisc_dequeue_peeked(p->link.q); if (skb) sch->q.qlen--; return skb; } static struct sk_buff *atm_tc_peek(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p); return p->link.q->ops->peek(p->link.q); } static unsigned int atm_tc_drop(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; unsigned int len; pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) { if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) return len; } return 0; } static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) { struct atm_qdisc_data *p = qdisc_priv(sch); pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); INIT_LIST_HEAD(&p->flows); INIT_LIST_HEAD(&p->link.list); list_add(&p->link.list, &p->flows); p->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); p->link.filter_list = NULL; p->link.vcc = NULL; p->link.sock = NULL; p->link.classid = sch->handle; p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } static void atm_tc_reset(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); sch->q.qlen = 0; } static void atm_tc_destroy(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow, *tmp; pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) tcf_destroy_chain(&flow->filter_list); list_for_each_entry_safe(flow, tmp, &p->flows, list) { if (flow->ref > 1) pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); atm_tc_put(sch, (unsigned long)flow); } tasklet_kill(&p->task); } static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; struct nlattr *nest; pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", sch, p, flow, skb, tcm); if (list_empty(&flow->list)) return -EINVAL; tcm->tcm_handle = flow->classid; tcm->tcm_info = flow->q->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); if (flow->vcc) { struct sockaddr_atmpvc pvc; int state; pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; pvc.sap_addr.vpi = flow->vcc->vpi; pvc.sap_addr.vci = flow->vcc->vci; NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); state = ATM_VF2VS(flow->vcc->flags); NLA_PUT_U32(skb, TCA_ATM_STATE, state); } if (flow->excess) NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); else NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct atm_flow_data *flow = (struct atm_flow_data *)arg; flow->qstats.qlen = flow->q->q.qlen; if (gnet_stats_copy_basic(d, &flow->bstats) < 0 || gnet_stats_copy_queue(d, &flow->qstats) < 0) return -1; return 0; } static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb) { return 0; } static const struct Qdisc_class_ops atm_class_ops = { .graft = atm_tc_graft, .leaf = atm_tc_leaf, .get = atm_tc_get, .put = atm_tc_put, .change = atm_tc_change, .delete = atm_tc_delete, .walk = atm_tc_walk, .tcf_chain = atm_tc_find_tcf, .bind_tcf = atm_tc_bind_filter, .unbind_tcf = atm_tc_put, .dump = atm_tc_dump_class, .dump_stats = atm_tc_dump_class_stats, }; static struct Qdisc_ops atm_qdisc_ops __read_mostly = { .cl_ops = &atm_class_ops, .id = "atm", .priv_size = sizeof(struct atm_qdisc_data), .enqueue = atm_tc_enqueue, .dequeue = atm_tc_dequeue, .peek = atm_tc_peek, .drop = atm_tc_drop, .init = atm_tc_init, .reset = atm_tc_reset, .destroy = atm_tc_destroy, .dump = atm_tc_dump, .owner = THIS_MODULE, }; static int __init atm_init(void) { return register_qdisc(&atm_qdisc_ops); } static void __exit atm_exit(void) { unregister_qdisc(&atm_qdisc_ops); } module_init(atm_init) module_exit(atm_exit) MODULE_LICENSE("GPL");
gpl-2.0
mathkid95/linux_lg_lollipop
arch/arm/plat-spear/time.c
4774
5675
/* * arch/arm/plat-spear/time.c * * Copyright (C) 2010 ST Microelectronics * Shiraz Hashim<shiraz.hashim@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/irq.h> #include <asm/mach/time.h> #include <mach/generic.h> #include <mach/hardware.h> #include <mach/irqs.h> /* * We would use TIMER0 and TIMER1 as clockevent and clocksource. * Timer0 and Timer1 both belong to same gpt block in cpu subbsystem. Further * they share same functional clock. Any change in one's functional clock will * also affect other timer. */ #define CLKEVT 0 /* gpt0, channel0 as clockevent */ #define CLKSRC 1 /* gpt0, channel1 as clocksource */ /* Register offsets, x is channel number */ #define CR(x) ((x) * 0x80 + 0x80) #define IR(x) ((x) * 0x80 + 0x84) #define LOAD(x) ((x) * 0x80 + 0x88) #define COUNT(x) ((x) * 0x80 + 0x8C) /* Reg bit definitions */ #define CTRL_INT_ENABLE 0x0100 #define CTRL_ENABLE 0x0020 #define CTRL_ONE_SHOT 0x0010 #define CTRL_PRESCALER1 0x0 #define CTRL_PRESCALER2 0x1 #define CTRL_PRESCALER4 0x2 #define CTRL_PRESCALER8 0x3 #define CTRL_PRESCALER16 0x4 #define CTRL_PRESCALER32 0x5 #define CTRL_PRESCALER64 0x6 #define CTRL_PRESCALER128 0x7 #define CTRL_PRESCALER256 0x8 #define INT_STATUS 0x1 /* * Minimum clocksource/clockevent timer range in seconds */ #define SPEAR_MIN_RANGE 4 static __iomem void *gpt_base; static struct clk *gpt_clk; static void clockevent_set_mode(enum clock_event_mode mode, struct clock_event_device *clk_event_dev); static int clockevent_next_event(unsigned long evt, struct clock_event_device *clk_event_dev); static void spear_clocksource_init(void) { u32 tick_rate; u16 val; /* program the prescaler (/256)*/ writew(CTRL_PRESCALER256, gpt_base + CR(CLKSRC)); /* find out actual clock driving Timer */ tick_rate = clk_get_rate(gpt_clk); tick_rate >>= CTRL_PRESCALER256; writew(0xFFFF, gpt_base + LOAD(CLKSRC)); val = readw(gpt_base + CR(CLKSRC)); val &= ~CTRL_ONE_SHOT; /* autoreload mode */ val |= CTRL_ENABLE ; writew(val, gpt_base + CR(CLKSRC)); /* register the clocksource */ clocksource_mmio_init(gpt_base + COUNT(CLKSRC), "tmr1", tick_rate, 200, 16, clocksource_mmio_readw_up); } static struct clock_event_device clkevt = { .name = "tmr0", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = clockevent_set_mode, .set_next_event = clockevent_next_event, .shift = 0, /* to be computed */ }; static void clockevent_set_mode(enum clock_event_mode mode, struct clock_event_device *clk_event_dev) { u32 period; u16 val; /* stop the timer */ val = readw(gpt_base + CR(CLKEVT)); val &= ~CTRL_ENABLE; writew(val, gpt_base + CR(CLKEVT)); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: period = clk_get_rate(gpt_clk) / HZ; period >>= CTRL_PRESCALER16; writew(period, gpt_base + LOAD(CLKEVT)); val = readw(gpt_base + CR(CLKEVT)); val &= ~CTRL_ONE_SHOT; val |= CTRL_ENABLE | CTRL_INT_ENABLE; writew(val, gpt_base + CR(CLKEVT)); break; case CLOCK_EVT_MODE_ONESHOT: val = readw(gpt_base + CR(CLKEVT)); val |= CTRL_ONE_SHOT; writew(val, gpt_base + CR(CLKEVT)); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_RESUME: break; default: pr_err("Invalid mode requested\n"); break; } } static int clockevent_next_event(unsigned long cycles, struct clock_event_device *clk_event_dev) { u16 val = readw(gpt_base + CR(CLKEVT)); if (val & CTRL_ENABLE) writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT)); writew(cycles, gpt_base + LOAD(CLKEVT)); val |= CTRL_ENABLE | CTRL_INT_ENABLE; writew(val, gpt_base + CR(CLKEVT)); return 0; } static irqreturn_t spear_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clkevt; writew(INT_STATUS, gpt_base + IR(CLKEVT)); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction spear_timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = spear_timer_interrupt }; static void __init spear_clockevent_init(void) { u32 tick_rate; /* program the prescaler */ writew(CTRL_PRESCALER16, gpt_base + CR(CLKEVT)); tick_rate = clk_get_rate(gpt_clk); tick_rate >>= CTRL_PRESCALER16; clockevents_calc_mult_shift(&clkevt, tick_rate, SPEAR_MIN_RANGE); clkevt.max_delta_ns = clockevent_delta2ns(0xfff0, &clkevt); clkevt.min_delta_ns = clockevent_delta2ns(3, &clkevt); clkevt.cpumask = cpumask_of(0); clockevents_register_device(&clkevt); setup_irq(SPEAR_GPT0_CHAN0_IRQ, &spear_timer_irq); } void __init spear_setup_timer(void) { int ret; if (!request_mem_region(SPEAR_GPT0_BASE, SZ_1K, "gpt0")) { pr_err("%s:cannot get IO addr\n", __func__); return; } gpt_base = (void __iomem *)ioremap(SPEAR_GPT0_BASE, SZ_1K); if (!gpt_base) { pr_err("%s:ioremap failed for gpt\n", __func__); goto err_mem; } gpt_clk = clk_get_sys("gpt0", NULL); if (!gpt_clk) { pr_err("%s:couldn't get clk for gpt\n", __func__); goto err_iomap; } ret = clk_enable(gpt_clk); if (ret < 0) { pr_err("%s:couldn't enable gpt clock\n", __func__); goto err_clk; } spear_clockevent_init(); spear_clocksource_init(); return; err_clk: clk_put(gpt_clk); err_iomap: iounmap(gpt_base); err_mem: release_mem_region(SPEAR_GPT0_BASE, SZ_1K); }
gpl-2.0
neomanu/NeoKernel-MT6589-A116
arch/m68k/mac/via.c
4774
16503
/* * 6522 Versatile Interface Adapter (VIA) * * There are two of these on the Mac II. Some IRQs are vectored * via them as are assorted bits and bobs - eg RTC, ADB. * * CSA: Motorola seems to have removed documentation on the 6522 from * their web site; try * http://nerini.drf.com/vectrex/other/text/chips/6522/ * http://www.zymurgy.net/classic/vic20/vicdet1.htm * and * http://193.23.168.87/mikro_laborversuche/via_iobaustein/via6522_1.html * for info. A full-text web search on 6522 AND VIA will probably also * net some usefulness. <cananian@alumni.princeton.edu> 20apr1999 * * Additional data is here (the SY6522 was used in the Mac II etc): * http://www.6502.org/documents/datasheets/synertek/synertek_sy6522.pdf * http://www.6502.org/documents/datasheets/synertek/synertek_sy6522_programming_reference.pdf * * PRAM/RTC access algorithms are from the NetBSD RTC toolkit version 1.08b * by Erik Vogan and adapted to Linux by Joshua M. Thompson (funaho@jurai.org) * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/module.h> #include <linux/irq.h> #include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include <asm/mac_psc.h> #include <asm/mac_oss.h> volatile __u8 *via1, *via2; int rbv_present; int via_alt_mapping; EXPORT_SYMBOL(via_alt_mapping); static __u8 rbv_clear; /* * Globals for accessing the VIA chip registers without having to * check if we're hitting a real VIA or an RBV. Normally you could * just hit the combined register (ie, vIER|rIER) but that seems to * break on AV Macs...probably because they actually decode more than * eight address bits. Why can't Apple engineers at least be * _consistently_ lazy? - 1999-05-21 (jmt) */ static int gIER,gIFR,gBufA,gBufB; /* * Timer defs. */ #define TICK_SIZE 10000 #define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */ #define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF) #define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8) /* * On Macs with a genuine VIA chip there is no way to mask an individual slot * interrupt. This limitation also seems to apply to VIA clone logic cores in * Quadra-like ASICs. (RBV and OSS machines don't have this limitation.) * * We used to fake it by configuring the relevent VIA pin as an output * (to mask the interrupt) or input (to unmask). That scheme did not work on * (at least) the Quadra 700. A NuBus card's /NMRQ signal is an open-collector * circuit (see Designing Cards and Drivers for Macintosh II and Macintosh SE, * p. 10-11 etc) but VIA outputs are not (see datasheet). * * Driving these outputs high must cause the VIA to source current and the * card to sink current when it asserts /NMRQ. Current will flow but the pin * voltage is uncertain and so the /NMRQ condition may still cause a transition * at the VIA2 CA1 input (which explains the lost interrupts). A side effect * is that a disabled slot IRQ can never be tested as pending or not. * * Driving these outputs low doesn't work either. All the slot /NMRQ lines are * (active low) OR'd together to generate the CA1 (aka "SLOTS") interrupt (see * The Guide To Macintosh Family Hardware, 2nd edition p. 167). If we drive a * disabled /NMRQ line low, the falling edge immediately triggers a CA1 * interrupt and all slot interrupts after that will generate no transition * and therefore no interrupt, even after being re-enabled. * * So we make the VIA port A I/O lines inputs and use nubus_disabled to keep * track of their states. When any slot IRQ becomes disabled we mask the CA1 * umbrella interrupt. Only when all slot IRQs become enabled do we unmask * the CA1 interrupt. It must remain enabled even when cards have no interrupt * handler registered. Drivers must therefore disable a slot interrupt at the * device before they call free_irq (like shared and autovector interrupts). * * There is also a related problem when MacOS is used to boot Linux. A network * card brought up by a MacOS driver may raise an interrupt while Linux boots. * This can be fatal since it can't be handled until the right driver loads * (if such a driver exists at all). Apparently related to this hardware * limitation, "Designing Cards and Drivers", p. 9-8, says that a slot * interrupt with no driver would crash MacOS (the book was written before * the appearance of Macs with RBV or OSS). */ static u8 nubus_disabled; void via_debug_dump(void); /* * Initialize the VIAs * * First we figure out where they actually _are_ as well as what type of * VIA we have for VIA2 (it could be a real VIA or an RBV or even an OSS.) * Then we pretty much clear them out and disable all IRQ sources. * * Note: the OSS is actually "detected" here and not in oss_init(). It just * seems more logical to do it here since via_init() needs to know * these things anyways. */ void __init via_init(void) { switch(macintosh_config->via_type) { /* IIci, IIsi, IIvx, IIvi (P6xx), LC series */ case MAC_VIA_IICI: via1 = (void *) VIA1_BASE; if (macintosh_config->ident == MAC_MODEL_IIFX) { via2 = NULL; rbv_present = 0; oss_present = 1; } else { via2 = (void *) RBV_BASE; rbv_present = 1; oss_present = 0; } if (macintosh_config->ident == MAC_MODEL_LCIII) { rbv_clear = 0x00; } else { /* on most RBVs (& unlike the VIAs), you */ /* need to set bit 7 when you write to IFR */ /* in order for your clear to occur. */ rbv_clear = 0x80; } gIER = rIER; gIFR = rIFR; gBufA = rSIFR; gBufB = rBufB; break; /* Quadra and early MacIIs agree on the VIA locations */ case MAC_VIA_QUADRA: case MAC_VIA_II: via1 = (void *) VIA1_BASE; via2 = (void *) VIA2_BASE; rbv_present = 0; oss_present = 0; rbv_clear = 0x00; gIER = vIER; gIFR = vIFR; gBufA = vBufA; gBufB = vBufB; break; default: panic("UNKNOWN VIA TYPE"); } printk(KERN_INFO "VIA1 at %p is a 6522 or clone\n", via1); printk(KERN_INFO "VIA2 at %p is ", via2); if (rbv_present) { printk("an RBV\n"); } else if (oss_present) { printk("an OSS\n"); } else { printk("a 6522 or clone\n"); } #ifdef DEBUG_VIA via_debug_dump(); #endif /* * Shut down all IRQ sources, reset the timers, and * kill the timer latch on VIA1. */ via1[vIER] = 0x7F; via1[vIFR] = 0x7F; via1[vT1LL] = 0; via1[vT1LH] = 0; via1[vT1CL] = 0; via1[vT1CH] = 0; via1[vT2CL] = 0; via1[vT2CH] = 0; via1[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */ via1[vACR] &= ~0x03; /* disable port A & B latches */ /* * SE/30: disable video IRQ * XXX: testing for SE/30 VBL */ if (macintosh_config->ident == MAC_MODEL_SE30) { via1[vDirB] |= 0x40; via1[vBufB] |= 0x40; } /* * Set the RTC bits to a known state: all lines to outputs and * RTC disabled (yes that's 0 to enable and 1 to disable). */ via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData); via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk); /* Everything below this point is VIA2/RBV only... */ if (oss_present) return; if ((macintosh_config->via_type == MAC_VIA_QUADRA) && (macintosh_config->adb_type != MAC_ADB_PB1) && (macintosh_config->adb_type != MAC_ADB_PB2) && (macintosh_config->ident != MAC_MODEL_C660) && (macintosh_config->ident != MAC_MODEL_Q840)) { via_alt_mapping = 1; via1[vDirB] |= 0x40; via1[vBufB] &= ~0x40; } else { via_alt_mapping = 0; } /* * Now initialize VIA2. For RBV we just kill all interrupts; * for a regular VIA we also reset the timers and stuff. */ via2[gIER] = 0x7F; via2[gIFR] = 0x7F | rbv_clear; if (!rbv_present) { via2[vT1LL] = 0; via2[vT1LH] = 0; via2[vT1CL] = 0; via2[vT1CH] = 0; via2[vT2CL] = 0; via2[vT2CH] = 0; via2[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */ via2[vACR] &= ~0x03; /* disable port A & B latches */ } /* Everything below this point is VIA2 only... */ if (rbv_present) return; /* * Set vPCR for control line interrupts. * * CA1 (SLOTS IRQ), CB1 (ASC IRQ): negative edge trigger. * * Macs with ESP SCSI have a negative edge triggered SCSI interrupt. * Testing reveals that PowerBooks do too. However, the SE/30 * schematic diagram shows an active high NCR5380 IRQ line. */ pr_debug("VIA2 vPCR is 0x%02X\n", via2[vPCR]); if (macintosh_config->via_type == MAC_VIA_II) { /* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, pos. edge */ via2[vPCR] = 0x66; } else { /* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, neg. edge */ via2[vPCR] = 0x22; } } /* * Start the 100 Hz clock */ void __init via_init_clock(irq_handler_t func) { via1[vACR] |= 0x40; via1[vT1LL] = MAC_CLOCK_LOW; via1[vT1LH] = MAC_CLOCK_HIGH; via1[vT1CL] = MAC_CLOCK_LOW; via1[vT1CH] = MAC_CLOCK_HIGH; if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func)) pr_err("Couldn't register %s interrupt\n", "timer"); } /* * Debugging dump, used in various places to see what's going on. */ void via_debug_dump(void) { printk(KERN_DEBUG "VIA1: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n", (uint) via1[vDirA], (uint) via1[vDirB], (uint) via1[vACR]); printk(KERN_DEBUG " PCR = 0x%02X IFR = 0x%02X IER = 0x%02X\n", (uint) via1[vPCR], (uint) via1[vIFR], (uint) via1[vIER]); if (oss_present) { printk(KERN_DEBUG "VIA2: <OSS>\n"); } else if (rbv_present) { printk(KERN_DEBUG "VIA2: IFR = 0x%02X IER = 0x%02X\n", (uint) via2[rIFR], (uint) via2[rIER]); printk(KERN_DEBUG " SIFR = 0x%02X SIER = 0x%02X\n", (uint) via2[rSIFR], (uint) via2[rSIER]); } else { printk(KERN_DEBUG "VIA2: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n", (uint) via2[vDirA], (uint) via2[vDirB], (uint) via2[vACR]); printk(KERN_DEBUG " PCR = 0x%02X IFR = 0x%02X IER = 0x%02X\n", (uint) via2[vPCR], (uint) via2[vIFR], (uint) via2[vIER]); } } /* * This is always executed with interrupts disabled. * * TBI: get time offset between scheduling timer ticks */ unsigned long mac_gettimeoffset (void) { unsigned long ticks, offset = 0; /* read VIA1 timer 2 current value */ ticks = via1[vT1CL] | (via1[vT1CH] << 8); /* The probability of underflow is less than 2% */ if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50) /* Check for pending timer interrupt in VIA1 IFR */ if (via1[vIFR] & 0x40) offset = TICK_SIZE; ticks = MAC_CLOCK_TICK - ticks; ticks = ticks * 10000L / MAC_CLOCK_TICK; return ticks + offset; } /* * Flush the L2 cache on Macs that have it by flipping * the system into 24-bit mode for an instant. */ void via_flush_cache(void) { via2[gBufB] &= ~VIA2B_vMode32; via2[gBufB] |= VIA2B_vMode32; } /* * Return the status of the L2 cache on a IIci */ int via_get_cache_disable(void) { /* Safeguard against being called accidentally */ if (!via2) { printk(KERN_ERR "via_get_cache_disable called on a non-VIA machine!\n"); return 1; } return (int) via2[gBufB] & VIA2B_vCDis; } /* * Initialize VIA2 for Nubus access */ void __init via_nubus_init(void) { /* unlock nubus transactions */ if ((macintosh_config->adb_type != MAC_ADB_PB1) && (macintosh_config->adb_type != MAC_ADB_PB2)) { /* set the line to be an output on non-RBV machines */ if (!rbv_present) via2[vDirB] |= 0x02; /* this seems to be an ADB bit on PMU machines */ /* according to MkLinux. -- jmt */ via2[gBufB] |= 0x02; } /* * Disable the slot interrupts. On some hardware that's not possible. * On some hardware it's unclear what all of these I/O lines do. */ switch (macintosh_config->via_type) { case MAC_VIA_II: case MAC_VIA_QUADRA: pr_debug("VIA2 vDirA is 0x%02X\n", via2[vDirA]); break; case MAC_VIA_IICI: /* RBV. Disable all the slot interrupts. SIER works like IER. */ via2[rSIER] = 0x7F; break; } } void via_nubus_irq_startup(int irq) { int irq_idx = IRQ_IDX(irq); switch (macintosh_config->via_type) { case MAC_VIA_II: case MAC_VIA_QUADRA: /* Make the port A line an input. Probably redundant. */ if (macintosh_config->via_type == MAC_VIA_II) { /* The top two bits are RAM size outputs. */ via2[vDirA] &= 0xC0 | ~(1 << irq_idx); } else { /* Allow NuBus slots 9 through F. */ via2[vDirA] &= 0x80 | ~(1 << irq_idx); } /* fall through */ case MAC_VIA_IICI: via_irq_enable(irq); break; } } void via_nubus_irq_shutdown(int irq) { switch (macintosh_config->via_type) { case MAC_VIA_II: case MAC_VIA_QUADRA: /* Ensure that the umbrella CA1 interrupt remains enabled. */ via_irq_enable(irq); break; case MAC_VIA_IICI: via_irq_disable(irq); break; } } /* * The generic VIA interrupt routines (shamelessly stolen from Alan Cox's * via6522.c :-), disable/pending masks added. */ void via1_irq(unsigned int irq, struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; events = via1[vIFR] & via1[vIER] & 0x7F; if (!events) return; irq_num = VIA1_SOURCE_BASE; irq_bit = 1; do { if (events & irq_bit) { via1[vIFR] = irq_bit; generic_handle_irq(irq_num); } ++irq_num; irq_bit <<= 1; } while (events >= irq_bit); } static void via2_irq(unsigned int irq, struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; events = via2[gIFR] & via2[gIER] & 0x7F; if (!events) return; irq_num = VIA2_SOURCE_BASE; irq_bit = 1; do { if (events & irq_bit) { via2[gIFR] = irq_bit | rbv_clear; generic_handle_irq(irq_num); } ++irq_num; irq_bit <<= 1; } while (events >= irq_bit); } /* * Dispatch Nubus interrupts. We are called as a secondary dispatch by the * VIA2 dispatcher as a fast interrupt handler. */ void via_nubus_irq(unsigned int irq, struct irq_desc *desc) { int slot_irq; unsigned char slot_bit, events; events = ~via2[gBufA] & 0x7F; if (rbv_present) events &= via2[rSIER]; else events &= ~via2[vDirA]; if (!events) return; do { slot_irq = IRQ_NUBUS_F; slot_bit = 0x40; do { if (events & slot_bit) { events &= ~slot_bit; generic_handle_irq(slot_irq); } --slot_irq; slot_bit >>= 1; } while (events); /* clear the CA1 interrupt and make certain there's no more. */ via2[gIFR] = 0x02 | rbv_clear; events = ~via2[gBufA] & 0x7F; if (rbv_present) events &= via2[rSIER]; else events &= ~via2[vDirA]; } while (events); } /* * Register the interrupt dispatchers for VIA or RBV machines only. */ void __init via_register_interrupts(void) { if (via_alt_mapping) { /* software interrupt */ irq_set_chained_handler(IRQ_AUTO_1, via1_irq); /* via1 interrupt */ irq_set_chained_handler(IRQ_AUTO_6, via1_irq); } else { irq_set_chained_handler(IRQ_AUTO_1, via1_irq); } irq_set_chained_handler(IRQ_AUTO_2, via2_irq); irq_set_chained_handler(IRQ_MAC_NUBUS, via_nubus_irq); } void via_irq_enable(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); #ifdef DEBUG_IRQUSE printk(KERN_DEBUG "via_irq_enable(%d)\n", irq); #endif if (irq_src == 1) { via1[vIER] = IER_SET_BIT(irq_idx); } else if (irq_src == 2) { if (irq != IRQ_MAC_NUBUS || nubus_disabled == 0) via2[gIER] = IER_SET_BIT(irq_idx); } else if (irq_src == 7) { switch (macintosh_config->via_type) { case MAC_VIA_II: case MAC_VIA_QUADRA: nubus_disabled &= ~(1 << irq_idx); /* Enable the CA1 interrupt when no slot is disabled. */ if (!nubus_disabled) via2[gIER] = IER_SET_BIT(1); break; case MAC_VIA_IICI: /* On RBV, enable the slot interrupt. * SIER works like IER. */ via2[rSIER] = IER_SET_BIT(irq_idx); break; } } } void via_irq_disable(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); #ifdef DEBUG_IRQUSE printk(KERN_DEBUG "via_irq_disable(%d)\n", irq); #endif if (irq_src == 1) { via1[vIER] = IER_CLR_BIT(irq_idx); } else if (irq_src == 2) { via2[gIER] = IER_CLR_BIT(irq_idx); } else if (irq_src == 7) { switch (macintosh_config->via_type) { case MAC_VIA_II: case MAC_VIA_QUADRA: nubus_disabled |= 1 << irq_idx; if (nubus_disabled) via2[gIER] = IER_CLR_BIT(1); break; case MAC_VIA_IICI: via2[rSIER] = IER_CLR_BIT(irq_idx); break; } } } void via1_set_head(int head) { if (head == 0) via1[vBufA] &= ~VIA1A_vHeadSel; else via1[vBufA] |= VIA1A_vHeadSel; } EXPORT_SYMBOL(via1_set_head); int via2_scsi_drq_pending(void) { return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ)); } EXPORT_SYMBOL(via2_scsi_drq_pending);
gpl-2.0
wan5xp/android_kernel_xiaomi_armani
drivers/media/dvb/frontends/tda8083.c
5030
12067
/* Driver for Philips TDA8083 based QPSK Demodulator Copyright (C) 2001 Convergence Integrated Media GmbH written by Ralph Metzler <ralph@convergence.de> adoption to the new DVB frontend API and diagnostic ioctl's by Holger Waechtler <holger@convergence.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include "dvb_frontend.h" #include "tda8083.h" struct tda8083_state { struct i2c_adapter* i2c; /* configuration settings */ const struct tda8083_config* config; struct dvb_frontend frontend; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "tda8083: " args); \ } while (0) static u8 tda8083_init_tab [] = { 0x04, 0x00, 0x4a, 0x79, 0x04, 0x00, 0xff, 0xea, 0x48, 0x42, 0x79, 0x60, 0x70, 0x52, 0x9a, 0x10, 0x0e, 0x10, 0xf2, 0xa7, 0x93, 0x0b, 0x05, 0xc8, 0x9d, 0x00, 0x42, 0x80, 0x00, 0x60, 0x40, 0x00, 0x00, 0x75, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static int tda8083_writereg (struct tda8083_state* state, u8 reg, u8 data) { int ret; u8 buf [] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk ("%s: writereg error (reg %02x, ret == %i)\n", __func__, reg, ret); return (ret != 1) ? -1 : 0; } static int tda8083_readregs (struct tda8083_state* state, u8 reg1, u8 *b, u8 len) { int ret; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = &reg1, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b, .len = len } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk ("%s: readreg error (reg %02x, ret == %i)\n", __func__, reg1, ret); return ret == 2 ? 0 : -1; } static inline u8 tda8083_readreg (struct tda8083_state* state, u8 reg) { u8 val; tda8083_readregs (state, reg, &val, 1); return val; } static int tda8083_set_inversion (struct tda8083_state* state, fe_spectral_inversion_t inversion) { /* XXX FIXME: implement other modes than FEC_AUTO */ if (inversion == INVERSION_AUTO) return 0; return -EINVAL; } static int tda8083_set_fec (struct tda8083_state* state, fe_code_rate_t fec) { if (fec == FEC_AUTO) return tda8083_writereg (state, 0x07, 0xff); if (fec >= FEC_1_2 && fec <= FEC_8_9) return tda8083_writereg (state, 0x07, 1 << (FEC_8_9 - fec)); return -EINVAL; } static fe_code_rate_t tda8083_get_fec (struct tda8083_state* state) { u8 index; static fe_code_rate_t fec_tab [] = { FEC_8_9, FEC_1_2, FEC_2_3, FEC_3_4, FEC_4_5, FEC_5_6, FEC_6_7, FEC_7_8 }; index = tda8083_readreg(state, 0x0e) & 0x07; return fec_tab [index]; } static int tda8083_set_symbolrate (struct tda8083_state* state, u32 srate) { u32 ratio; u32 tmp; u8 filter; if (srate > 32000000) srate = 32000000; if (srate < 500000) srate = 500000; filter = 0; if (srate < 24000000) filter = 2; if (srate < 16000000) filter = 3; tmp = 31250 << 16; ratio = tmp / srate; tmp = (tmp % srate) << 8; ratio = (ratio << 8) + tmp / srate; tmp = (tmp % srate) << 8; ratio = (ratio << 8) + tmp / srate; dprintk("tda8083: ratio == %08x\n", (unsigned int) ratio); tda8083_writereg (state, 0x05, filter); tda8083_writereg (state, 0x02, (ratio >> 16) & 0xff); tda8083_writereg (state, 0x03, (ratio >> 8) & 0xff); tda8083_writereg (state, 0x04, (ratio ) & 0xff); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 1; } static void tda8083_wait_diseqc_fifo (struct tda8083_state* state, int timeout) { unsigned long start = jiffies; while (jiffies - start < timeout && !(tda8083_readreg(state, 0x02) & 0x80)) { msleep(50); }; } static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t tone) { tda8083_writereg (state, 0x26, 0xf1); switch (tone) { case SEC_TONE_OFF: return tda8083_writereg (state, 0x29, 0x00); case SEC_TONE_ON: return tda8083_writereg (state, 0x29, 0x80); default: return -EINVAL; }; } static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t voltage) { switch (voltage) { case SEC_VOLTAGE_13: return tda8083_writereg (state, 0x20, 0x00); case SEC_VOLTAGE_18: return tda8083_writereg (state, 0x20, 0x11); default: return -EINVAL; }; } static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_cmd_t burst) { switch (burst) { case SEC_MINI_A: tda8083_writereg (state, 0x29, (5 << 2)); /* send burst A */ break; case SEC_MINI_B: tda8083_writereg (state, 0x29, (7 << 2)); /* send B */ break; default: return -EINVAL; }; tda8083_wait_diseqc_fifo (state, 100); return 0; } static int tda8083_send_diseqc_msg (struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *m) { struct tda8083_state* state = fe->demodulator_priv; int i; tda8083_writereg (state, 0x29, (m->msg_len - 3) | (1 << 2)); /* enable */ for (i=0; i<m->msg_len; i++) tda8083_writereg (state, 0x23 + i, m->msg[i]); tda8083_writereg (state, 0x29, (m->msg_len - 3) | (3 << 2)); /* send!! */ tda8083_wait_diseqc_fifo (state, 100); return 0; } static int tda8083_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct tda8083_state* state = fe->demodulator_priv; u8 signal = ~tda8083_readreg (state, 0x01); u8 sync = tda8083_readreg (state, 0x02); *status = 0; if (signal > 10) *status |= FE_HAS_SIGNAL; if (sync & 0x01) *status |= FE_HAS_CARRIER; if (sync & 0x02) *status |= FE_HAS_VITERBI; if (sync & 0x10) *status |= FE_HAS_SYNC; if (sync & 0x20) /* frontend can not lock */ *status |= FE_TIMEDOUT; if ((sync & 0x1f) == 0x1f) *status |= FE_HAS_LOCK; return 0; } static int tda8083_read_ber(struct dvb_frontend* fe, u32* ber) { struct tda8083_state* state = fe->demodulator_priv; int ret; u8 buf[3]; if ((ret = tda8083_readregs(state, 0x0b, buf, sizeof(buf)))) return ret; *ber = ((buf[0] & 0x1f) << 16) | (buf[1] << 8) | buf[2]; return 0; } static int tda8083_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct tda8083_state* state = fe->demodulator_priv; u8 signal = ~tda8083_readreg (state, 0x01); *strength = (signal << 8) | signal; return 0; } static int tda8083_read_snr(struct dvb_frontend* fe, u16* snr) { struct tda8083_state* state = fe->demodulator_priv; u8 _snr = tda8083_readreg (state, 0x08); *snr = (_snr << 8) | _snr; return 0; } static int tda8083_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct tda8083_state* state = fe->demodulator_priv; *ucblocks = tda8083_readreg(state, 0x0f); if (*ucblocks == 0xff) *ucblocks = 0xffffffff; return 0; } static int tda8083_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda8083_state* state = fe->demodulator_priv; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } tda8083_set_inversion (state, p->inversion); tda8083_set_fec(state, p->fec_inner); tda8083_set_symbolrate(state, p->symbol_rate); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 0; } static int tda8083_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda8083_state* state = fe->demodulator_priv; /* FIXME: get symbolrate & frequency offset...*/ /*p->frequency = ???;*/ p->inversion = (tda8083_readreg (state, 0x0e) & 0x80) ? INVERSION_ON : INVERSION_OFF; p->fec_inner = tda8083_get_fec(state); /*p->symbol_rate = tda8083_get_symbolrate (state);*/ return 0; } static int tda8083_sleep(struct dvb_frontend* fe) { struct tda8083_state* state = fe->demodulator_priv; tda8083_writereg (state, 0x00, 0x02); return 0; } static int tda8083_init(struct dvb_frontend* fe) { struct tda8083_state* state = fe->demodulator_priv; int i; for (i=0; i<44; i++) tda8083_writereg (state, i, tda8083_init_tab[i]); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 0; } static int tda8083_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst) { struct tda8083_state* state = fe->demodulator_priv; tda8083_send_diseqc_burst (state, burst); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 0; } static int tda8083_diseqc_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct tda8083_state* state = fe->demodulator_priv; tda8083_set_tone (state, tone); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 0; } static int tda8083_diseqc_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct tda8083_state* state = fe->demodulator_priv; tda8083_set_voltage (state, voltage); tda8083_writereg (state, 0x00, 0x3c); tda8083_writereg (state, 0x00, 0x04); return 0; } static void tda8083_release(struct dvb_frontend* fe) { struct tda8083_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops tda8083_ops; struct dvb_frontend* tda8083_attach(const struct tda8083_config* config, struct i2c_adapter* i2c) { struct tda8083_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct tda8083_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; /* check if the demod is there */ if ((tda8083_readreg(state, 0x00)) != 0x05) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &tda8083_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops tda8083_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Philips TDA8083 DVB-S", .frequency_min = 920000, /* TDA8060 */ .frequency_max = 2200000, /* TDA8060 */ .frequency_stepsize = 125, /* kHz for QPSK frontends */ /* .frequency_tolerance = ???,*/ .symbol_rate_min = 12000000, .symbol_rate_max = 30000000, /* .symbol_rate_tolerance = ???,*/ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_MUTE_TS }, .release = tda8083_release, .init = tda8083_init, .sleep = tda8083_sleep, .set_frontend = tda8083_set_frontend, .get_frontend = tda8083_get_frontend, .read_status = tda8083_read_status, .read_signal_strength = tda8083_read_signal_strength, .read_snr = tda8083_read_snr, .read_ber = tda8083_read_ber, .read_ucblocks = tda8083_read_ucblocks, .diseqc_send_master_cmd = tda8083_send_diseqc_msg, .diseqc_send_burst = tda8083_diseqc_send_burst, .set_tone = tda8083_diseqc_set_tone, .set_voltage = tda8083_diseqc_set_voltage, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(tda8083_attach);
gpl-2.0
MoKee/android_kernel_zte_x9180
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
5030
47371
/* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2010 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include "qlcnic.h" #include <linux/slab.h> #include <net/ip.h> #include <linux/bitops.h> #define MASK(n) ((1ULL<<(n))-1) #define OCM_WIN_P3P(addr) (addr & 0xffc0000) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) #define CRB_INDIRECT_2M (0x1e0000UL) #ifndef readq static inline u64 readq(void __iomem *addr) { return readl(addr) | (((u64) readl(addr + 4)) << 32LL); } #endif #ifndef writeq static inline void writeq(u64 val, void __iomem *addr) { writel(((u32) (val)), (addr)); writel(((u32) (val >> 32)), (addr + 4)); } #endif static const struct crb_128M_2M_block_map crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static const unsigned crb_hub_agt[64] = { 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PS, QLCNIC_HW_CRB_HUB_AGT_ADR_MN, QLCNIC_HW_CRB_HUB_AGT_ADR_MS, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, QLCNIC_HW_CRB_HUB_AGT_ADR_SN, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_EG, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PS, QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* PCI Windowing for DDR regions. */ #define QLCNIC_PCIE_SEM_TIMEOUT 10000 int qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) { int done = 0, timeout = 0; while (!done) { done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); if (done == 1) break; if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { dev_err(&adapter->pdev->dev, "Failed to acquire sem=%d lock; holdby=%d\n", sem, id_reg ? QLCRD32(adapter, id_reg) : -1); return -EIO; } msleep(1); } if (id_reg) QLCWR32(adapter, id_reg, adapter->portnum); return 0; } void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) { QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); } static int qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { u32 i, producer, consumer; struct qlcnic_cmd_buffer *pbuf; struct cmd_desc_type0 *cmd_desc; struct qlcnic_host_tx_ring *tx_ring; i = 0; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return -EIO; tx_ring = adapter->tx_ring; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; consumer = tx_ring->sw_consumer; if (nr_desc >= qlcnic_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); smp_mb(); if (qlcnic_tx_avail(tx_ring) > nr_desc) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) netif_tx_wake_queue(tx_ring->txq); } else { adapter->stats.xmit_off++; __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } } do { cmd_desc = &cmd_desc_arr[i]; pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; memcpy(&tx_ring->desc_head[producer], &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); producer = get_next_index(producer, tx_ring->num_desc); i++; } while (i != nr_desc); tx_ring->producer = producer; qlcnic_update_cmd_producer(adapter, tx_ring); __netif_tx_unlock_bh(tx_ring->txq); return 0; } static int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, __le16 vlan_id, unsigned op) { struct qlcnic_nic_req req; struct qlcnic_mac_req *mac_req; struct qlcnic_vlan_req *vlan_req; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); mac_req = (struct qlcnic_mac_req *)&req.words[0]; mac_req->op = op; memcpy(mac_req->mac_addr, addr, 6); vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; vlan_req->vlan_id = vlan_id; return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) { struct list_head *head; struct qlcnic_mac_list_s *cur; /* look up if already exists */ list_for_each(head, &adapter->mac_list) { cur = list_entry(head, struct qlcnic_mac_list_s, list); if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) return 0; } cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); if (cur == NULL) { dev_err(&adapter->netdev->dev, "failed to add mac address filter\n"); return -ENOMEM; } memcpy(cur->mac_addr, addr, ETH_ALEN); if (qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 0, QLCNIC_MAC_ADD)) { kfree(cur); return -EIO; } list_add_tail(&cur->list, &adapter->mac_list); return 0; } void qlcnic_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; qlcnic_nic_add_mac(adapter, adapter->mac_addr); qlcnic_nic_add_mac(adapter, bcast_addr); if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; goto send_fw_cmd; } if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > adapter->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; goto send_fw_cmd; } if (!netdev_mc_empty(netdev)) { netdev_for_each_mc_addr(ha, netdev) { qlcnic_nic_add_mac(adapter, ha->addr); } } send_fw_cmd: if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { qlcnic_alloc_lb_filters_mem(adapter); adapter->mac_learn = 1; } else { adapter->mac_learn = 0; } qlcnic_nic_set_promisc(adapter, mode); } int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { struct qlcnic_nic_req req; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(mode); return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) { struct qlcnic_mac_list_s *cur; struct list_head *head = &adapter->mac_list; while (!list_empty(head)) { cur = list_entry(head->next, struct qlcnic_mac_list_s, list); qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 0, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); } } void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; struct hlist_node *tmp_hnode, *n; struct hlist_head *head; int i; for (i = 0; i < adapter->fhash.fmax; i++) { head = &(adapter->fhash.fhead[i]); hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { if (jiffies > (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, tmp_fil->vlan_id, tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); spin_unlock_bh(&adapter->mac_learn_lock); kfree(tmp_fil); } } } } void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; struct hlist_node *tmp_hnode, *n; struct hlist_head *head; int i; for (i = 0; i < adapter->fhash.fmax; i++) { head = &(adapter->fhash.fhead[i]); hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, tmp_fil->vlan_id, tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); spin_unlock_bh(&adapter->mac_learn_lock); kfree(tmp_fil); } } } int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) { struct qlcnic_nic_req req; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); req.words[0] = cpu_to_le64(flag); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", flag ? "Set" : "Reset"); return rv; } int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { if (qlcnic_set_fw_loopback(adapter, mode)) return -EIO; if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { qlcnic_set_fw_loopback(adapter, 0); return -EIO; } msleep(1000); return 0; } void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) { int mode = VPORT_MISS_MODE_DROP; struct net_device *netdev = adapter->netdev; qlcnic_set_fw_loopback(adapter, 0); if (netdev->flags & IFF_PROMISC) mode = VPORT_MISS_MODE_ACCEPT_ALL; else if (netdev->flags & IFF_ALLMULTI) mode = VPORT_MISS_MODE_ACCEPT_MULTI; qlcnic_nic_set_promisc(adapter, mode); msleep(1000); } /* * Send the interrupt coalescing parameter set by ethtool to the card. */ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) { struct qlcnic_nic_req req; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | ((u64) adapter->portnum << 16)); req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | ((u64) adapter->ahw->coal.rx_time_us) << 16); req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | ((u64) adapter->ahw->coal.type) << 32 | ((u64) adapter->ahw->coal.sts_ring_mask) << 40); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send interrupt coalescing parameters\n"); return rv; } int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int rv; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send configure hw lro request\n"); return rv; } int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { struct qlcnic_nic_req req; u64 word; int rv; if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send configure bridge mode request\n"); adapter->flags ^= QLCNIC_BRIDGE_ENABLED; return rv; } #define RSS_HASHTYPE_IP_TCP 0x3 int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int i, rv; static const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); /* * RSS request: * bits 3-0: hash_method * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 47-10: reserved * 63-48: indirection table mask */ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | ((0x7ULL) << 48); req.words[0] = cpu_to_le64(word); for (i = 0; i < 5; i++) req.words[i+1] = cpu_to_le64(key[i]); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not configure RSS\n"); return rv; } int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) { struct qlcnic_nic_req req; struct qlcnic_ipaddr *ipa; u64 word; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(cmd); ipa = (struct qlcnic_ipaddr *)&req.words[1]; ipa->ipv4 = ip; rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x reuqest\n", (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); return rv; } int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not configure link notification\n"); return rv; } int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) { struct qlcnic_nic_req req; u64 word; int rv; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_LRO_REQUEST | ((u64)adapter->portnum << 16) | ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; req.req_hdr = cpu_to_le64(word); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not cleanup lro flows\n"); return rv; } /* * qlcnic_change_mtu - Change the Maximum Transfer Unit * @returns 0 on success, negative on failure */ int qlcnic_change_mtu(struct net_device *netdev, int mtu) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int rc = 0; if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); return -EINVAL; } rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); if (!rc) netdev->mtu = mtu; return rc; } netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { netdev_features_t changed = features ^ netdev->features; features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); } if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; return features; } int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = netdev->features ^ features; int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; if (!(changed & NETIF_F_LRO)) return 0; netdev->features = features ^ NETIF_F_LRO; if (qlcnic_config_hw_lro(adapter, hw_lro)) return -EIO; if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) return -EIO; return 0; } /* * Changes the CRB window to the specified window. */ /* Returns < 0 if off is not valid, * 1 if window access is needed. 'off' is set to offset from * CRB space in 128M pci map * 0 if no window access is needed. 'off' is set to 2M addr * In: 'off' is offset from base in 128M pci map */ static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, ulong off, void __iomem **addr) { const struct crb_128M_2M_sub_block_map *m; if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) return -EINVAL; off -= QLCNIC_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { *addr = adapter->ahw->pci_base0 + m->start_2M + (off - m->start_128M); return 0; } /* * Not in direct map, use crb window */ *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); return 1; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static int qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) { u32 window; void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; off -= QLCNIC_PCI_CRBSPACE; window = CRB_HI(off); if (window == 0) { dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); return -EIO; } writel(window, addr); if (readl(addr) != window) { if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); return -EIO; } return 0; } int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) { unsigned long flags; int rv; void __iomem *addr = NULL; rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) { writel(data, addr); return 0; } if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw->crb_lock, flags); crb_win_lock(adapter); rv = qlcnic_pci_set_crbwindow_2M(adapter, off); if (!rv) writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); return rv; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -EIO; } u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) { unsigned long flags; int rv; u32 data = -1; void __iomem *addr = NULL; rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) return readl(addr); if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw->crb_lock, flags); crb_win_lock(adapter); if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); return data; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -1; } void __iomem * qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) { void __iomem *addr = NULL; WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); return addr; } static int qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, u64 addr, u32 *start) { u32 window; window = OCM_WIN_P3P(addr); writel(window, adapter->ahw->ocm_win_crb); /* read back to flush */ readl(adapter->ahw->ocm_win_crb); *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); return 0; } static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, u64 *data, int op) { void __iomem *addr; int ret; u32 start; mutex_lock(&adapter->ahw->mem_lock); ret = qlcnic_pci_set_window_2M(adapter, off, &start); if (ret != 0) goto unlock; addr = adapter->ahw->pci_base0 + start; if (op == 0) /* read */ *data = readq(addr); else /* write */ writeq(*data, addr); unlock: mutex_unlock(&adapter->ahw->mem_lock); return ret; } void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) { void __iomem *addr = adapter->ahw->pci_base0 + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); mutex_lock(&adapter->ahw->mem_lock); *data = readq(addr); mutex_unlock(&adapter->ahw->mem_lock); } void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) { void __iomem *addr = adapter->ahw->pci_base0 + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); mutex_lock(&adapter->ahw->mem_lock); writeq(data, addr); mutex_unlock(&adapter->ahw->mem_lock); } #define MAX_CTL_CHECK 1000 int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) { int i, j, ret; u32 temp, off8; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, QLCNIC_ADDR_QDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); return -EIO; correct: off8 = off & ~0xf; mutex_lock(&adapter->ahw->mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); i = 0; writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { ret = -EIO; goto done; } i = (off & 0xf) ? 0 : 2; writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), mem_crb + MIU_TEST_AGT_WRDATA(i)); writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), mem_crb + MIU_TEST_AGT_WRDATA(i+1)); i = (off & 0xf) ? 2 : 0; writel(data & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA(i)); writel((data >> 32) & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA(i+1)); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; done: mutex_unlock(&adapter->ahw->mem_lock); return ret; } int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off8; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, QLCNIC_ADDR_QDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { return qlcnic_pci_mem_access_direct(adapter, off, data, 0); } return -EIO; correct: off8 = off & ~0xf; mutex_lock(&adapter->ahw->mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { off8 = MIU_TEST_AGT_RDDATA_LO; if (off & 0xf) off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; temp = readl(mem_crb + off8 + 4); val = (u64)temp << 32; val |= readl(mem_crb + off8); *data = val; ret = 0; } mutex_unlock(&adapter->ahw->mem_lock); return ret; } int qlcnic_get_board_info(struct qlcnic_adapter *adapter) { int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; offset = QLCNIC_FW_MAGIC_OFFSET; if (qlcnic_rom_fast_read(adapter, offset, &magic)) return -EIO; if (magic != QLCNIC_BDINFO_MAGIC) { dev_err(&pdev->dev, "invalid board config, magic=%08x\n", magic); return -EIO; } offset = QLCNIC_BRDTYPE_OFFSET; if (qlcnic_rom_fast_read(adapter, offset, &board_type)) return -EIO; adapter->ahw->board_type = board_type; if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); if ((gpio & 0x8000) == 0) board_type = QLCNIC_BRDTYPE_P3P_10G_TP; } switch (board_type) { case QLCNIC_BRDTYPE_P3P_HMEZ: case QLCNIC_BRDTYPE_P3P_XG_LOM: case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_IMEZ: case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: case QLCNIC_BRDTYPE_P3P_10G_XFP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: adapter->ahw->port_type = QLCNIC_XGBE; break; case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: adapter->ahw->port_type = QLCNIC_GBE; break; case QLCNIC_BRDTYPE_P3P_10G_TP: adapter->ahw->port_type = (adapter->portnum < 2) ? QLCNIC_XGBE : QLCNIC_GBE; break; default: dev_err(&pdev->dev, "unknown board type %x\n", board_type); adapter->ahw->port_type = QLCNIC_XGBE; break; } return 0; } int qlcnic_wol_supported(struct qlcnic_adapter *adapter) { u32 wol_cfg; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); if (wol_cfg & (1UL << adapter->portnum)) { wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); if (wol_cfg & (1 << adapter->portnum)) return 1; } return 0; } int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) { struct qlcnic_nic_req req; int rv; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64((u64)rate << 32); req.words[1] = cpu_to_le64(state); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv) dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); return rv; } /* FW dump related functions */ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i; u32 addr, data; struct __crb *crb = &entry->region.crb; void __iomem *base = adapter->ahw->pci_base0; addr = crb->addr; for (i = 0; i < crb->no_ops; i++) { QLCNIC_RD_DUMP_REG(addr, base, &data); *buffer++ = cpu_to_le32(addr); *buffer++ = cpu_to_le32(data); addr += crb->stride; } return crb->no_ops * 2 * sizeof(u32); } static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i, k, timeout = 0; void __iomem *base = adapter->ahw->pci_base0; u32 addr, data; u8 opcode, no_ops; struct __ctrl *ctr = &entry->region.ctrl; struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; addr = ctr->addr; no_ops = ctr->no_ops; for (i = 0; i < no_ops; i++) { k = 0; opcode = 0; for (k = 0; k < 8; k++) { if (!(ctr->opcode & (1 << k))) continue; switch (1 << k) { case QLCNIC_DUMP_WCRB: QLCNIC_WR_DUMP_REG(addr, base, ctr->val1); break; case QLCNIC_DUMP_RWCRB: QLCNIC_RD_DUMP_REG(addr, base, &data); QLCNIC_WR_DUMP_REG(addr, base, data); break; case QLCNIC_DUMP_ANDCRB: QLCNIC_RD_DUMP_REG(addr, base, &data); QLCNIC_WR_DUMP_REG(addr, base, (data & ctr->val2)); break; case QLCNIC_DUMP_ORCRB: QLCNIC_RD_DUMP_REG(addr, base, &data); QLCNIC_WR_DUMP_REG(addr, base, (data | ctr->val3)); break; case QLCNIC_DUMP_POLLCRB: while (timeout <= ctr->timeout) { QLCNIC_RD_DUMP_REG(addr, base, &data); if ((data & ctr->val2) == ctr->val1) break; msleep(1); timeout++; } if (timeout > ctr->timeout) { dev_info(&adapter->pdev->dev, "Timed out, aborting poll CRB\n"); return -EINVAL; } break; case QLCNIC_DUMP_RD_SAVE: if (ctr->index_a) addr = t_hdr->saved_state[ctr->index_a]; QLCNIC_RD_DUMP_REG(addr, base, &data); t_hdr->saved_state[ctr->index_v] = data; break; case QLCNIC_DUMP_WRT_SAVED: if (ctr->index_v) data = t_hdr->saved_state[ctr->index_v]; else data = ctr->val1; if (ctr->index_a) addr = t_hdr->saved_state[ctr->index_a]; QLCNIC_WR_DUMP_REG(addr, base, data); break; case QLCNIC_DUMP_MOD_SAVE_ST: data = t_hdr->saved_state[ctr->index_v]; data <<= ctr->shl_val; data >>= ctr->shr_val; if (ctr->val2) data &= ctr->val2; data |= ctr->val3; data += ctr->val1; t_hdr->saved_state[ctr->index_v] = data; break; default: dev_info(&adapter->pdev->dev, "Unknown opcode\n"); break; } } addr += ctr->stride; } return 0; } static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int loop; u32 val, data = 0; struct __mux *mux = &entry->region.mux; void __iomem *base = adapter->ahw->pci_base0; val = mux->val; for (loop = 0; loop < mux->no_ops; loop++) { QLCNIC_WR_DUMP_REG(mux->addr, base, val); QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data); *buffer++ = cpu_to_le32(val); *buffer++ = cpu_to_le32(data); val += mux->val_stride; } return 2 * mux->no_ops * sizeof(u32); } static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i, loop; u32 cnt, addr, data, que_id = 0; void __iomem *base = adapter->ahw->pci_base0; struct __queue *que = &entry->region.que; addr = que->read_addr; cnt = que->read_addr_cnt; for (loop = 0; loop < que->no_ops; loop++) { QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); addr = que->read_addr; for (i = 0; i < cnt; i++) { QLCNIC_RD_DUMP_REG(addr, base, &data); *buffer++ = cpu_to_le32(data); addr += que->read_addr_stride; } que_id += que->stride; } return que->no_ops * cnt * sizeof(u32); } static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i; u32 data; void __iomem *addr; struct __ocm *ocm = &entry->region.ocm; addr = adapter->ahw->pci_base0 + ocm->read_addr; for (i = 0; i < ocm->no_ops; i++) { data = readl(addr); *buffer++ = cpu_to_le32(data); addr += ocm->read_addr_stride; } return ocm->no_ops * sizeof(u32); } static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i, count = 0; u32 fl_addr, size, val, lck_val, addr; struct __mem *rom = &entry->region.mem; void __iomem *base = adapter->ahw->pci_base0; fl_addr = rom->addr; size = rom->size/4; lock_try: lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); if (!lck_val && count < MAX_CTL_CHECK) { msleep(10); count++; goto lock_try; } writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); for (i = 0; i < size; i++) { addr = fl_addr & 0xFFFF0000; QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr); addr = LSW(fl_addr) + FLASH_ROM_DATA; QLCNIC_RD_DUMP_REG(addr, base, &val); fl_addr += 4; *buffer++ = cpu_to_le32(val); } readl(base + QLCNIC_FLASH_SEM2_ULK); return rom->size; } static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i; u32 cnt, val, data, addr; void __iomem *base = adapter->ahw->pci_base0; struct __cache *l1 = &entry->region.cache; val = l1->init_tag_val; for (i = 0; i < l1->no_ops; i++) { QLCNIC_WR_DUMP_REG(l1->addr, base, val); QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val)); addr = l1->read_addr; cnt = l1->read_addr_num; while (cnt) { QLCNIC_RD_DUMP_REG(addr, base, &data); *buffer++ = cpu_to_le32(data); addr += l1->read_addr_stride; cnt--; } val += l1->stride; } return l1->no_ops * l1->read_addr_num * sizeof(u32); } static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { int i; u32 cnt, val, data, addr; u8 poll_mask, poll_to, time_out = 0; void __iomem *base = adapter->ahw->pci_base0; struct __cache *l2 = &entry->region.cache; val = l2->init_tag_val; poll_mask = LSB(MSW(l2->ctrl_val)); poll_to = MSB(MSW(l2->ctrl_val)); for (i = 0; i < l2->no_ops; i++) { QLCNIC_WR_DUMP_REG(l2->addr, base, val); if (LSW(l2->ctrl_val)) QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base, LSW(l2->ctrl_val)); if (!poll_mask) goto skip_poll; do { QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data); if (!(data & poll_mask)) break; msleep(1); time_out++; } while (time_out <= poll_to); if (time_out > poll_to) { dev_err(&adapter->pdev->dev, "Timeout exceeded in %s, aborting dump\n", __func__); return -EINVAL; } skip_poll: addr = l2->read_addr; cnt = l2->read_addr_num; while (cnt) { QLCNIC_RD_DUMP_REG(addr, base, &data); *buffer++ = cpu_to_le32(data); addr += l2->read_addr_stride; cnt--; } val += l2->stride; } return l2->no_ops * l2->read_addr_num * sizeof(u32); } static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { u32 addr, data, test, ret = 0; int i, reg_read; struct __mem *mem = &entry->region.mem; void __iomem *base = adapter->ahw->pci_base0; reg_read = mem->size; addr = mem->addr; /* check for data size of multiple of 16 and 16 byte alignment */ if ((addr & 0xf) || (reg_read%16)) { dev_info(&adapter->pdev->dev, "Unaligned memory addr:0x%x size:0x%x\n", addr, reg_read); return -EINVAL; } mutex_lock(&adapter->ahw->mem_lock); while (reg_read != 0) { QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr); QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0); QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base, TA_CTL_ENABLE | TA_CTL_START); for (i = 0; i < MAX_CTL_CHECK; i++) { QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test); if (!(test & TA_CTL_BUSY)) break; } if (i == MAX_CTL_CHECK) { if (printk_ratelimit()) { dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EINVAL; goto out; } } for (i = 0; i < 4; i++) { QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data); *buffer++ = cpu_to_le32(data); } addr += 16; reg_read -= 16; ret += 16; } out: mutex_unlock(&adapter->ahw->mem_lock); return mem->size; } static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, u32 *buffer) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; return 0; } struct qlcnic_dump_operations fw_dump_ops[] = { { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, }; /* Walk the template and collect dump for each entry in the dump template */ static int qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, u32 size) { int ret = 1; if (size != entry->hdr.cap_size) { dev_info(dev, "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); dev_info(dev, "Aborting further dump capture\n"); ret = 0; } return ret; } int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { u32 *buffer; char mesg[64]; char *msg[] = {mesg, NULL}; int i, k, ops_cnt, ops_index, dump_size = 0; u32 entry_offset, dump, no_entries, buf_offset = 0; struct qlcnic_dump_entry *entry; struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; if (fw_dump->clr) { dev_info(&adapter->pdev->dev, "Previous dump not cleared, not capturing dump\n"); return -EIO; } /* Calculate the size for dump data area only */ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) if (i & tmpl_hdr->drv_cap_mask) dump_size += tmpl_hdr->cap_sizes[k]; if (!dump_size) return -EIO; fw_dump->data = vzalloc(dump_size); if (!fw_dump->data) { dev_info(&adapter->pdev->dev, "Unable to allocate (%d KB) for fw dump\n", dump_size/1024); return -ENOMEM; } buffer = fw_dump->data; fw_dump->size = dump_size; no_entries = tmpl_hdr->num_entries; ops_cnt = ARRAY_SIZE(fw_dump_ops); entry_offset = tmpl_hdr->offset; tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; tmpl_hdr->sys_info[1] = adapter->fw_version; for (i = 0; i < no_entries; i++) { entry = (void *)tmpl_hdr + entry_offset; if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; entry_offset += entry->hdr.offset; continue; } /* Find the handler for this entry */ ops_index = 0; while (ops_index < ops_cnt) { if (entry->hdr.type == fw_dump_ops[ops_index].opcode) break; ops_index++; } if (ops_index == ops_cnt) { dev_info(&adapter->pdev->dev, "Invalid entry type %d, exiting dump\n", entry->hdr.type); goto error; } /* Collect dump for this entry */ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump)) entry->hdr.flags |= QLCNIC_DUMP_SKIP; buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; } if (dump_size != buf_offset) { dev_info(&adapter->pdev->dev, "Captured(%d) and expected size(%d) do not match\n", buf_offset, dump_size); goto error; } else { fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", fw_dump->size); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); return 0; } error: vfree(fw_dump->data); return -EINVAL; }
gpl-2.0
CaptainThrowback/kernel_htc_m8whl
drivers/media/dvb/frontends/bcm3510.c
5030
21943
/* * Support for the Broadcom BCM3510 ATSC demodulator (1st generation Air2PC) * * Copyright (C) 2001-5, B2C2 inc. * * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de> * * This driver is "hard-coded" to be used with the 1st generation of * Technisat/B2C2's Air2PC ATSC PCI/USB cards/boxes. The pll-programming * (Panasonic CT10S) is located here, which is actually wrong. Unless there is * another device with a BCM3510, this is no problem. * * The driver works also with QAM64 DVB-C, but had an unreasonable high * UNC. (Tested with the Air2PC ATSC 1st generation) * * You'll need a firmware for this driver in order to get it running. It is * called "dvb-fe-bcm3510-01.fw". * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 675 Mass * Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> #include "dvb_frontend.h" #include "bcm3510.h" #include "bcm3510_priv.h" struct bcm3510_state { struct i2c_adapter* i2c; const struct bcm3510_config* config; struct dvb_frontend frontend; /* demodulator private data */ struct mutex hab_mutex; u8 firmware_loaded:1; unsigned long next_status_check; unsigned long status_check_interval; struct bcm3510_hab_cmd_status1 status1; struct bcm3510_hab_cmd_status2 status2; }; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=i2c (|-able))."); #define dprintk(level,x...) if (level & debug) printk(x) #define dbufout(b,l,m) {\ int i; \ for (i = 0; i < l; i++) \ m("%02x ",b[i]); \ } #define deb_info(args...) dprintk(0x01,args) #define deb_i2c(args...) dprintk(0x02,args) #define deb_hab(args...) dprintk(0x04,args) /* transfer functions */ static int bcm3510_writebytes (struct bcm3510_state *state, u8 reg, u8 *buf, u8 len) { u8 b[256]; int err; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = b, .len = len + 1 }; b[0] = reg; memcpy(&b[1],buf,len); deb_i2c("i2c wr %02x: ",reg); dbufout(buf,len,deb_i2c); deb_i2c("\n"); if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { deb_info("%s: i2c write error (addr %02x, reg %02x, err == %i)\n", __func__, state->config->demod_address, reg, err); return -EREMOTEIO; } return 0; } static int bcm3510_readbytes (struct bcm3510_state *state, u8 reg, u8 *buf, u8 len) { struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = buf, .len = len } }; int err; memset(buf,0,len); if ((err = i2c_transfer (state->i2c, msg, 2)) != 2) { deb_info("%s: i2c read error (addr %02x, reg %02x, err == %i)\n", __func__, state->config->demod_address, reg, err); return -EREMOTEIO; } deb_i2c("i2c rd %02x: ",reg); dbufout(buf,len,deb_i2c); deb_i2c("\n"); return 0; } static int bcm3510_writeB(struct bcm3510_state *state, u8 reg, bcm3510_register_value v) { return bcm3510_writebytes(state,reg,&v.raw,1); } static int bcm3510_readB(struct bcm3510_state *state, u8 reg, bcm3510_register_value *v) { return bcm3510_readbytes(state,reg,&v->raw,1); } /* Host Access Buffer transfers */ static int bcm3510_hab_get_response(struct bcm3510_state *st, u8 *buf, int len) { bcm3510_register_value v; int ret,i; v.HABADR_a6.HABADR = 0; if ((ret = bcm3510_writeB(st,0xa6,v)) < 0) return ret; for (i = 0; i < len; i++) { if ((ret = bcm3510_readB(st,0xa7,&v)) < 0) return ret; buf[i] = v.HABDATA_a7; } return 0; } static int bcm3510_hab_send_request(struct bcm3510_state *st, u8 *buf, int len) { bcm3510_register_value v,hab; int ret,i; unsigned long t; /* Check if any previous HAB request still needs to be serviced by the * Acquisition Processor before sending new request */ if ((ret = bcm3510_readB(st,0xa8,&v)) < 0) return ret; if (v.HABSTAT_a8.HABR) { deb_info("HAB is running already - clearing it.\n"); v.HABSTAT_a8.HABR = 0; bcm3510_writeB(st,0xa8,v); // return -EBUSY; } /* Send the start HAB Address (automatically incremented after write of * HABDATA) and write the HAB Data */ hab.HABADR_a6.HABADR = 0; if ((ret = bcm3510_writeB(st,0xa6,hab)) < 0) return ret; for (i = 0; i < len; i++) { hab.HABDATA_a7 = buf[i]; if ((ret = bcm3510_writeB(st,0xa7,hab)) < 0) return ret; } /* Set the HABR bit to indicate AP request in progress (LBHABR allows HABR to * be written) */ v.raw = 0; v.HABSTAT_a8.HABR = 1; v.HABSTAT_a8.LDHABR = 1; if ((ret = bcm3510_writeB(st,0xa8,v)) < 0) return ret; /* Polling method: Wait until the AP finishes processing the HAB request */ t = jiffies + 1*HZ; while (time_before(jiffies, t)) { deb_info("waiting for HAB to complete\n"); msleep(10); if ((ret = bcm3510_readB(st,0xa8,&v)) < 0) return ret; if (!v.HABSTAT_a8.HABR) return 0; } deb_info("send_request execution timed out.\n"); return -ETIMEDOUT; } static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *obuf, u8 olen, u8 *ibuf, u8 ilen) { u8 ob[olen+2],ib[ilen+2]; int ret = 0; ob[0] = cmd; ob[1] = msgid; memcpy(&ob[2],obuf,olen); deb_hab("hab snd: "); dbufout(ob,olen+2,deb_hab); deb_hab("\n"); if (mutex_lock_interruptible(&st->hab_mutex) < 0) return -EAGAIN; if ((ret = bcm3510_hab_send_request(st, ob, olen+2)) < 0 || (ret = bcm3510_hab_get_response(st, ib, ilen+2)) < 0) goto error; deb_hab("hab get: "); dbufout(ib,ilen+2,deb_hab); deb_hab("\n"); memcpy(ibuf,&ib[2],ilen); error: mutex_unlock(&st->hab_mutex); return ret; } #if 0 /* not needed, we use a semaphore to prevent HAB races */ static int bcm3510_is_ap_ready(struct bcm3510_state *st) { bcm3510_register_value ap,hab; int ret; if ((ret = bcm3510_readB(st,0xa8,&hab)) < 0 || (ret = bcm3510_readB(st,0xa2,&ap) < 0)) return ret; if (ap.APSTAT1_a2.RESET || ap.APSTAT1_a2.IDLE || ap.APSTAT1_a2.STOP || hab.HABSTAT_a8.HABR) { deb_info("AP is busy\n"); return -EBUSY; } return 0; } #endif static int bcm3510_bert_reset(struct bcm3510_state *st) { bcm3510_register_value b; int ret; if ((ret = bcm3510_readB(st,0xfa,&b)) < 0) return ret; b.BERCTL_fa.RESYNC = 0; bcm3510_writeB(st,0xfa,b); b.BERCTL_fa.RESYNC = 1; bcm3510_writeB(st,0xfa,b); b.BERCTL_fa.RESYNC = 0; bcm3510_writeB(st,0xfa,b); b.BERCTL_fa.CNTCTL = 1; b.BERCTL_fa.BITCNT = 1; bcm3510_writeB(st,0xfa,b); /* clear residual bit counter TODO */ return 0; } static int bcm3510_refresh_state(struct bcm3510_state *st) { if (time_after(jiffies,st->next_status_check)) { bcm3510_do_hab_cmd(st, CMD_STATUS, MSGID_STATUS1, NULL,0, (u8 *)&st->status1, sizeof(st->status1)); bcm3510_do_hab_cmd(st, CMD_STATUS, MSGID_STATUS2, NULL,0, (u8 *)&st->status2, sizeof(st->status2)); st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; } return 0; } static int bcm3510_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct bcm3510_state* st = fe->demodulator_priv; bcm3510_refresh_state(st); *status = 0; if (st->status1.STATUS1.RECEIVER_LOCK) *status |= FE_HAS_LOCK | FE_HAS_SYNC; if (st->status1.STATUS1.FEC_LOCK) *status |= FE_HAS_VITERBI; if (st->status1.STATUS1.OUT_PLL_LOCK) *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER; if (*status & FE_HAS_LOCK) st->status_check_interval = 1500; else /* more frequently checks if no lock has been achieved yet */ st->status_check_interval = 500; deb_info("real_status: %02x\n",*status); return 0; } static int bcm3510_read_ber(struct dvb_frontend* fe, u32* ber) { struct bcm3510_state* st = fe->demodulator_priv; bcm3510_refresh_state(st); *ber = (st->status2.LDBER0 << 16) | (st->status2.LDBER1 << 8) | st->status2.LDBER2; return 0; } static int bcm3510_read_unc(struct dvb_frontend* fe, u32* unc) { struct bcm3510_state* st = fe->demodulator_priv; bcm3510_refresh_state(st); *unc = (st->status2.LDUERC0 << 8) | st->status2.LDUERC1; return 0; } static int bcm3510_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct bcm3510_state* st = fe->demodulator_priv; s32 t; bcm3510_refresh_state(st); t = st->status2.SIGNAL; if (t > 190) t = 190; if (t < 90) t = 90; t -= 90; t = t * 0xff / 100; /* normalize if necessary */ *strength = (t << 8) | t; return 0; } static int bcm3510_read_snr(struct dvb_frontend* fe, u16* snr) { struct bcm3510_state* st = fe->demodulator_priv; bcm3510_refresh_state(st); *snr = st->status1.SNR_EST0*1000 + ((st->status1.SNR_EST1*1000) >> 8); return 0; } /* tuner frontend programming */ static int bcm3510_tuner_cmd(struct bcm3510_state* st,u8 bc, u16 n, u8 a) { struct bcm3510_hab_cmd_tune c; memset(&c,0,sizeof(struct bcm3510_hab_cmd_tune)); /* I2C Mode disabled, set 16 control / Data pairs */ c.length = 0x10; c.clock_width = 0; /* CS1, CS0, DATA, CLK bits control the tuner RF_AGC_SEL pin is set to * logic high (as Configuration) */ c.misc = 0x10; /* Set duration of the initial state of TUNCTL = 3.34 micro Sec */ c.TUNCTL_state = 0x40; /* PRESCALER DIVIDE RATIO | BC1_2_3_4; (band switch), 1stosc REFERENCE COUNTER REF_S12 and REF_S11 */ c.ctl_dat[0].ctrl.size = BITS_8; c.ctl_dat[0].data = 0x80 | bc; /* Control DATA pin, 1stosc REFERENCE COUNTER REF_S10 to REF_S3 */ c.ctl_dat[1].ctrl.size = BITS_8; c.ctl_dat[1].data = 4; /* set CONTROL BIT 1 to 1, 1stosc REFERENCE COUNTER REF_S2 to REF_S1 */ c.ctl_dat[2].ctrl.size = BITS_3; c.ctl_dat[2].data = 0x20; /* control CS0 pin, pulse byte ? */ c.ctl_dat[3].ctrl.size = BITS_3; c.ctl_dat[3].ctrl.clk_off = 1; c.ctl_dat[3].ctrl.cs0 = 1; c.ctl_dat[3].data = 0x40; /* PGM_S18 to PGM_S11 */ c.ctl_dat[4].ctrl.size = BITS_8; c.ctl_dat[4].data = n >> 3; /* PGM_S10 to PGM_S8, SWL_S7 to SWL_S3 */ c.ctl_dat[5].ctrl.size = BITS_8; c.ctl_dat[5].data = ((n & 0x7) << 5) | (a >> 2); /* SWL_S2 and SWL_S1, set CONTROL BIT 2 to 0 */ c.ctl_dat[6].ctrl.size = BITS_3; c.ctl_dat[6].data = (a << 6) & 0xdf; /* control CS0 pin, pulse byte ? */ c.ctl_dat[7].ctrl.size = BITS_3; c.ctl_dat[7].ctrl.clk_off = 1; c.ctl_dat[7].ctrl.cs0 = 1; c.ctl_dat[7].data = 0x40; /* PRESCALER DIVIDE RATIO, 2ndosc REFERENCE COUNTER REF_S12 and REF_S11 */ c.ctl_dat[8].ctrl.size = BITS_8; c.ctl_dat[8].data = 0x80; /* 2ndosc REFERENCE COUNTER REF_S10 to REF_S3 */ c.ctl_dat[9].ctrl.size = BITS_8; c.ctl_dat[9].data = 0x10; /* set CONTROL BIT 1 to 1, 2ndosc REFERENCE COUNTER REF_S2 to REF_S1 */ c.ctl_dat[10].ctrl.size = BITS_3; c.ctl_dat[10].data = 0x20; /* pulse byte */ c.ctl_dat[11].ctrl.size = BITS_3; c.ctl_dat[11].ctrl.clk_off = 1; c.ctl_dat[11].ctrl.cs1 = 1; c.ctl_dat[11].data = 0x40; /* PGM_S18 to PGM_S11 */ c.ctl_dat[12].ctrl.size = BITS_8; c.ctl_dat[12].data = 0x2a; /* PGM_S10 to PGM_S8 and SWL_S7 to SWL_S3 */ c.ctl_dat[13].ctrl.size = BITS_8; c.ctl_dat[13].data = 0x8e; /* SWL_S2 and SWL_S1 and set CONTROL BIT 2 to 0 */ c.ctl_dat[14].ctrl.size = BITS_3; c.ctl_dat[14].data = 0; /* Pulse Byte */ c.ctl_dat[15].ctrl.size = BITS_3; c.ctl_dat[15].ctrl.clk_off = 1; c.ctl_dat[15].ctrl.cs1 = 1; c.ctl_dat[15].data = 0x40; return bcm3510_do_hab_cmd(st,CMD_TUNE, MSGID_TUNE,(u8 *) &c,sizeof(c), NULL, 0); } static int bcm3510_set_freq(struct bcm3510_state* st,u32 freq) { u8 bc,a; u16 n; s32 YIntercept,Tfvco1; freq /= 1000; deb_info("%dkHz:",freq); /* set Band Switch */ if (freq <= 168000) bc = 0x1c; else if (freq <= 378000) bc = 0x2c; else bc = 0x30; if (freq >= 470000) { freq -= 470001; YIntercept = 18805; } else if (freq >= 90000) { freq -= 90001; YIntercept = 15005; } else if (freq >= 76000){ freq -= 76001; YIntercept = 14865; } else { freq -= 54001; YIntercept = 14645; } Tfvco1 = (((freq/6000)*60 + YIntercept)*4)/10; n = Tfvco1 >> 6; a = Tfvco1 & 0x3f; deb_info(" BC1_2_3_4: %x, N: %x A: %x\n", bc, n, a); if (n >= 16 && n <= 2047) return bcm3510_tuner_cmd(st,bc,n,a); return -EINVAL; } static int bcm3510_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct bcm3510_state* st = fe->demodulator_priv; struct bcm3510_hab_cmd_ext_acquire cmd; struct bcm3510_hab_cmd_bert_control bert; int ret; memset(&cmd,0,sizeof(cmd)); switch (c->modulation) { case QAM_256: cmd.ACQUIRE0.MODE = 0x1; cmd.ACQUIRE1.SYM_RATE = 0x1; cmd.ACQUIRE1.IF_FREQ = 0x1; break; case QAM_64: cmd.ACQUIRE0.MODE = 0x2; cmd.ACQUIRE1.SYM_RATE = 0x2; cmd.ACQUIRE1.IF_FREQ = 0x1; break; #if 0 case QAM_256: cmd.ACQUIRE0.MODE = 0x3; break; case QAM_128: cmd.ACQUIRE0.MODE = 0x4; break; case QAM_64: cmd.ACQUIRE0.MODE = 0x5; break; case QAM_32: cmd.ACQUIRE0.MODE = 0x6; break; case QAM_16: cmd.ACQUIRE0.MODE = 0x7; break; #endif case VSB_8: cmd.ACQUIRE0.MODE = 0x8; cmd.ACQUIRE1.SYM_RATE = 0x0; cmd.ACQUIRE1.IF_FREQ = 0x0; break; case VSB_16: cmd.ACQUIRE0.MODE = 0x9; cmd.ACQUIRE1.SYM_RATE = 0x0; cmd.ACQUIRE1.IF_FREQ = 0x0; default: return -EINVAL; }; cmd.ACQUIRE0.OFFSET = 0; cmd.ACQUIRE0.NTSCSWEEP = 1; cmd.ACQUIRE0.FA = 1; cmd.ACQUIRE0.BW = 0; /* if (enableOffset) { cmd.IF_OFFSET0 = xx; cmd.IF_OFFSET1 = xx; cmd.SYM_OFFSET0 = xx; cmd.SYM_OFFSET1 = xx; if (enableNtscSweep) { cmd.NTSC_OFFSET0; cmd.NTSC_OFFSET1; } } */ bcm3510_do_hab_cmd(st, CMD_ACQUIRE, MSGID_EXT_TUNER_ACQUIRE, (u8 *) &cmd, sizeof(cmd), NULL, 0); /* doing it with different MSGIDs, data book and source differs */ bert.BE = 0; bert.unused = 0; bcm3510_do_hab_cmd(st, CMD_STATE_CONTROL, MSGID_BERT_CONTROL, (u8 *) &bert, sizeof(bert), NULL, 0); bcm3510_do_hab_cmd(st, CMD_STATE_CONTROL, MSGID_BERT_SET, (u8 *) &bert, sizeof(bert), NULL, 0); bcm3510_bert_reset(st); ret = bcm3510_set_freq(st, c->frequency); if (ret < 0) return ret; memset(&st->status1,0,sizeof(st->status1)); memset(&st->status2,0,sizeof(st->status2)); st->status_check_interval = 500; /* Give the AP some time */ msleep(200); return 0; } static int bcm3510_sleep(struct dvb_frontend* fe) { return 0; } static int bcm3510_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *s) { s->min_delay_ms = 1000; s->step_size = 0; s->max_drift = 0; return 0; } static void bcm3510_release(struct dvb_frontend* fe) { struct bcm3510_state* state = fe->demodulator_priv; kfree(state); } /* firmware download: * firmware file is build up like this: * 16bit addr, 16bit length, 8byte of length */ #define BCM3510_DEFAULT_FIRMWARE "dvb-fe-bcm3510-01.fw" static int bcm3510_write_ram(struct bcm3510_state *st, u16 addr, const u8 *b, u16 len) { int ret = 0,i; bcm3510_register_value vH, vL,vD; vH.MADRH_a9 = addr >> 8; vL.MADRL_aa = addr; if ((ret = bcm3510_writeB(st,0xa9,vH)) < 0) return ret; if ((ret = bcm3510_writeB(st,0xaa,vL)) < 0) return ret; for (i = 0; i < len; i++) { vD.MDATA_ab = b[i]; if ((ret = bcm3510_writeB(st,0xab,vD)) < 0) return ret; } return 0; } static int bcm3510_download_firmware(struct dvb_frontend* fe) { struct bcm3510_state* st = fe->demodulator_priv; const struct firmware *fw; u16 addr,len; const u8 *b; int ret,i; deb_info("requesting firmware\n"); if ((ret = st->config->request_firmware(fe, &fw, BCM3510_DEFAULT_FIRMWARE)) < 0) { err("could not load firmware (%s): %d",BCM3510_DEFAULT_FIRMWARE,ret); return ret; } deb_info("got firmware: %zd\n",fw->size); b = fw->data; for (i = 0; i < fw->size;) { addr = le16_to_cpu( *( (u16 *)&b[i] ) ); len = le16_to_cpu( *( (u16 *)&b[i+2] ) ); deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size); if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) { err("firmware download failed: %d\n",ret); return ret; } i += 4 + len; } release_firmware(fw); deb_info("firmware download successfully completed\n"); return 0; } static int bcm3510_check_firmware_version(struct bcm3510_state *st) { struct bcm3510_hab_cmd_get_version_info ver; bcm3510_do_hab_cmd(st,CMD_GET_VERSION_INFO,MSGID_GET_VERSION_INFO,NULL,0,(u8*)&ver,sizeof(ver)); deb_info("Version information: 0x%02x 0x%02x 0x%02x 0x%02x\n", ver.microcode_version, ver.script_version, ver.config_version, ver.demod_version); if (ver.script_version == BCM3510_DEF_SCRIPT_VERSION && ver.config_version == BCM3510_DEF_CONFIG_VERSION && ver.demod_version == BCM3510_DEF_DEMOD_VERSION) return 0; deb_info("version check failed\n"); return -ENODEV; } /* (un)resetting the AP */ static int bcm3510_reset(struct bcm3510_state *st) { int ret; unsigned long t; bcm3510_register_value v; bcm3510_readB(st,0xa0,&v); v.HCTL1_a0.RESET = 1; if ((ret = bcm3510_writeB(st,0xa0,v)) < 0) return ret; t = jiffies + 3*HZ; while (time_before(jiffies, t)) { msleep(10); if ((ret = bcm3510_readB(st,0xa2,&v)) < 0) return ret; if (v.APSTAT1_a2.RESET) return 0; } deb_info("reset timed out\n"); return -ETIMEDOUT; } static int bcm3510_clear_reset(struct bcm3510_state *st) { bcm3510_register_value v; int ret; unsigned long t; v.raw = 0; if ((ret = bcm3510_writeB(st,0xa0,v)) < 0) return ret; t = jiffies + 3*HZ; while (time_before(jiffies, t)) { msleep(10); if ((ret = bcm3510_readB(st,0xa2,&v)) < 0) return ret; /* verify that reset is cleared */ if (!v.APSTAT1_a2.RESET) return 0; } deb_info("reset clear timed out\n"); return -ETIMEDOUT; } static int bcm3510_init_cold(struct bcm3510_state *st) { int ret; bcm3510_register_value v; /* read Acquisation Processor status register and check it is not in RUN mode */ if ((ret = bcm3510_readB(st,0xa2,&v)) < 0) return ret; if (v.APSTAT1_a2.RUN) { deb_info("AP is already running - firmware already loaded.\n"); return 0; } deb_info("reset?\n"); if ((ret = bcm3510_reset(st)) < 0) return ret; deb_info("tristate?\n"); /* tri-state */ v.TSTCTL_2e.CTL = 0; if ((ret = bcm3510_writeB(st,0x2e,v)) < 0) return ret; deb_info("firmware?\n"); if ((ret = bcm3510_download_firmware(&st->frontend)) < 0 || (ret = bcm3510_clear_reset(st)) < 0) return ret; /* anything left here to Let the acquisition processor begin execution at program counter 0000 ??? */ return 0; } static int bcm3510_init(struct dvb_frontend* fe) { struct bcm3510_state* st = fe->demodulator_priv; bcm3510_register_value j; struct bcm3510_hab_cmd_set_agc c; int ret; if ((ret = bcm3510_readB(st,0xca,&j)) < 0) return ret; deb_info("JDEC: %02x\n",j.raw); switch (j.JDEC_ca.JDEC) { case JDEC_WAIT_AT_RAM: deb_info("attempting to download firmware\n"); if ((ret = bcm3510_init_cold(st)) < 0) return ret; case JDEC_EEPROM_LOAD_WAIT: /* fall-through is wanted */ deb_info("firmware is loaded\n"); bcm3510_check_firmware_version(st); break; default: return -ENODEV; } memset(&c,0,1); c.SEL = 1; bcm3510_do_hab_cmd(st,CMD_AUTO_PARAM,MSGID_SET_RF_AGC_SEL,(u8 *)&c,sizeof(c),NULL,0); return 0; } static struct dvb_frontend_ops bcm3510_ops; struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config, struct i2c_adapter *i2c) { struct bcm3510_state* state = NULL; int ret; bcm3510_register_value v; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct bcm3510_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; /* create dvb_frontend */ memcpy(&state->frontend.ops, &bcm3510_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; mutex_init(&state->hab_mutex); if ((ret = bcm3510_readB(state,0xe0,&v)) < 0) goto error; deb_info("Revision: 0x%1x, Layer: 0x%1x.\n",v.REVID_e0.REV,v.REVID_e0.LAYER); if ((v.REVID_e0.REV != 0x1 && v.REVID_e0.LAYER != 0xb) && /* cold */ (v.REVID_e0.REV != 0x8 && v.REVID_e0.LAYER != 0x0)) /* warm */ goto error; info("Revision: 0x%1x, Layer: 0x%1x.",v.REVID_e0.REV,v.REVID_e0.LAYER); bcm3510_reset(state); return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(bcm3510_attach); static struct dvb_frontend_ops bcm3510_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Broadcom BCM3510 VSB/QAM frontend", .frequency_min = 54000000, .frequency_max = 803000000, /* stepsize is just a guess */ .frequency_stepsize = 0, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_8VSB | FE_CAN_16VSB | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 }, .release = bcm3510_release, .init = bcm3510_init, .sleep = bcm3510_sleep, .set_frontend = bcm3510_set_frontend, .get_tune_settings = bcm3510_get_tune_settings, .read_status = bcm3510_read_status, .read_ber = bcm3510_read_ber, .read_signal_strength = bcm3510_read_signal_strength, .read_snr = bcm3510_read_snr, .read_ucblocks = bcm3510_read_unc, }; MODULE_DESCRIPTION("Broadcom BCM3510 ATSC (8VSB/16VSB & ITU J83 AnnexB FEC QAM64/256) demodulator driver"); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); MODULE_LICENSE("GPL");
gpl-2.0
tsiktsiris/falcon
drivers/ide/ide-pci-generic.c
5030
6195
/* * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> * Portions (C) Copyright 2002 Red Hat Inc * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #define DRV_NAME "ide_pci_generic" static bool ide_generic_all; /* Set to claim all devices */ module_param_named(all_generic_ide, ide_generic_all, bool, 0444); MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); static void netcell_quirkproc(ide_drive_t *drive) { /* mark words 85-87 as valid */ drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000; } static const struct ide_port_ops netcell_port_ops = { .quirkproc = netcell_quirkproc, }; #define DECLARE_GENERIC_PCI_DEV(extra_flags) \ { \ .name = DRV_NAME, \ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \ extra_flags, \ .swdma_mask = ATA_SWDMA2, \ .mwdma_mask = ATA_MWDMA2, \ .udma_mask = ATA_UDMA6, \ } static const struct ide_port_info generic_chipsets[] __devinitdata = { /* 0: Unknown */ DECLARE_GENERIC_PCI_DEV(0), { /* 1: NS87410 */ .name = DRV_NAME, .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} }, .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, }, /* 2: SAMURAI / HT6565 / HINT_IDE */ DECLARE_GENERIC_PCI_DEV(0), /* 3: UM8673F / UM8886A / UM8886BF */ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA), /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA), { /* 5: VIA8237SATA */ .name = DRV_NAME, .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_OFF_BOARD, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, }, { /* 6: Revolution */ .name = DRV_NAME, .port_ops = &netcell_port_ops, .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_OFF_BOARD, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, } }; /** * generic_init_one - called when a PIIX is found * @dev: the generic device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d = &generic_chipsets[id->driver_data]; int ret = -ENODEV; /* Don't use the generic entry unless instructed to do so */ if (id->driver_data == 0 && ide_generic_all == 0) goto out; switch (dev->vendor) { case PCI_VENDOR_ID_UMC: if (dev->device == PCI_DEVICE_ID_UMC_UM8886A && !(PCI_FUNC(dev->devfn) & 1)) goto out; /* UM8886A/BF pair */ break; case PCI_VENDOR_ID_OPTI: if (dev->device == PCI_DEVICE_ID_OPTI_82C558 && !(PCI_FUNC(dev->devfn) & 1)) goto out; break; case PCI_VENDOR_ID_JMICRON: if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 && PCI_FUNC(dev->devfn) != 1) goto out; break; case PCI_VENDOR_ID_NS: if (dev->device == PCI_DEVICE_ID_NS_87410 && (dev->class >> 8) != PCI_CLASS_STORAGE_IDE) goto out; break; } if (dev->vendor != PCI_VENDOR_ID_JMICRON) { u16 command; pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_IO)) { printk(KERN_INFO "%s %s: skipping disabled " "controller\n", d->name, pci_name(dev)); goto out; } } ret = ide_pci_init_one(dev, d, NULL); out: return ret; } static const struct pci_device_id generic_pci_tbl[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 }, { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 }, { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 }, { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 }, { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 }, #endif { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), 4 }, { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 }, /* * Must come last. If you add entries adjust * this table and generic_chipsets[] appropriately. */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, generic_pci_tbl); static struct pci_driver generic_pci_driver = { .name = "PCI_IDE", .id_table = generic_pci_tbl, .probe = generic_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init generic_ide_init(void) { return ide_pci_register_driver(&generic_pci_driver); } static void __exit generic_ide_exit(void) { pci_unregister_driver(&generic_pci_driver); } module_init(generic_ide_init); module_exit(generic_ide_exit); MODULE_AUTHOR("Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for generic PCI IDE"); MODULE_LICENSE("GPL");
gpl-2.0
drowningchild/lgog_old
fs/omfs/file.c
7846
9325
/* * OMFS (as used by RIO Karma) file operations. * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/mpage.h> #include "omfs.h" static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) { return (sbi->s_sys_blocksize - offset - sizeof(struct omfs_extent)) / sizeof(struct omfs_extent_entry) + 1; } void omfs_make_empty_table(struct buffer_head *bh, int offset) { struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset]; oe->e_next = ~cpu_to_be64(0ULL); oe->e_extent_count = cpu_to_be32(1), oe->e_fill = cpu_to_be32(0x22), oe->e_entry.e_cluster = ~cpu_to_be64(0ULL); oe->e_entry.e_blocks = ~cpu_to_be64(0ULL); } int omfs_shrink_inode(struct inode *inode) { struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct omfs_extent *oe; struct omfs_extent_entry *entry; struct buffer_head *bh; u64 next, last; u32 extent_count; u32 max_extents; int ret; /* traverse extent table, freeing each entry that is greater * than inode->i_size; */ next = inode->i_ino; /* only support truncate -> 0 for now */ ret = -EIO; if (inode->i_size != 0) goto out; bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); for (;;) { if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) goto out_brelse; extent_count = be32_to_cpu(oe->e_extent_count); if (extent_count > max_extents) goto out_brelse; last = next; next = be64_to_cpu(oe->e_next); entry = &oe->e_entry; /* ignore last entry as it is the terminator */ for (; extent_count > 1; extent_count--) { u64 start, count; start = be64_to_cpu(entry->e_cluster); count = be64_to_cpu(entry->e_blocks); omfs_clear_range(inode->i_sb, start, (int) count); entry++; } omfs_make_empty_table(bh, (char *) oe - bh->b_data); mark_buffer_dirty(bh); brelse(bh); if (last != inode->i_ino) omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); if (next == ~0) break; bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); } ret = 0; out: return ret; out_brelse: brelse(bh); return ret; } static void omfs_truncate(struct inode *inode) { omfs_shrink_inode(inode); mark_inode_dirty(inode); } /* * Add new blocks to the current extent, or create new entries/continuations * as necessary. */ static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe, u64 *ret_block) { struct omfs_extent_entry *terminator; struct omfs_extent_entry *entry = &oe->e_entry; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); u32 extent_count = be32_to_cpu(oe->e_extent_count); u64 new_block = 0; u32 max_count; int new_count; int ret = 0; /* reached the end of the extent table with no blocks mapped. * there are three possibilities for adding: grow last extent, * add a new extent to the current extent table, and add a * continuation inode. in last two cases need an allocator for * sbi->s_cluster_size */ /* TODO: handle holes */ /* should always have a terminator */ if (extent_count < 1) return -EIO; /* trivially grow current extent, if next block is not taken */ terminator = entry + extent_count - 1; if (extent_count > 1) { entry = terminator-1; new_block = be64_to_cpu(entry->e_cluster) + be64_to_cpu(entry->e_blocks); if (omfs_allocate_block(inode->i_sb, new_block)) { entry->e_blocks = cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1); terminator->e_blocks = ~(cpu_to_be64( be64_to_cpu(~terminator->e_blocks) + 1)); goto out; } } max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); /* TODO: add a continuation block here */ if (be32_to_cpu(oe->e_extent_count) > max_count-1) return -EIO; /* try to allocate a new cluster */ ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, &new_block, &new_count); if (ret) goto out_fail; /* copy terminator down an entry */ entry = terminator; terminator++; memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); entry->e_cluster = cpu_to_be64(new_block); entry->e_blocks = cpu_to_be64((u64) new_count); terminator->e_blocks = ~(cpu_to_be64( be64_to_cpu(~terminator->e_blocks) + (u64) new_count)); /* write in new entry */ oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count)); out: *ret_block = new_block; out_fail: return ret; } /* * Scans across the directory table for a given file block number. * If block not found, return 0. */ static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, sector_t block, int count, int *left) { /* count > 1 because of terminator */ sector_t searched = 0; for (; count > 1; count--) { int numblocks = clus_to_blk(OMFS_SB(inode->i_sb), be64_to_cpu(ent->e_blocks)); if (block >= searched && block < searched + numblocks) { /* * found it at cluster + (block - searched) * numblocks - (block - searched) is remainder */ *left = numblocks - (block - searched); return clus_to_blk(OMFS_SB(inode->i_sb), be64_to_cpu(ent->e_cluster)) + block - searched; } searched += numblocks; ent++; } return 0; } static int omfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct buffer_head *bh; sector_t next, offset; int ret; u64 uninitialized_var(new_block); u32 max_extents; int extent_count; struct omfs_extent *oe; struct omfs_extent_entry *entry; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); int max_blocks = bh_result->b_size >> inode->i_blkbits; int remain; ret = -EIO; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); next = inode->i_ino; for (;;) { if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) goto out_brelse; extent_count = be32_to_cpu(oe->e_extent_count); next = be64_to_cpu(oe->e_next); entry = &oe->e_entry; if (extent_count > max_extents) goto out_brelse; offset = find_block(inode, entry, block, extent_count, &remain); if (offset > 0) { ret = 0; map_bh(bh_result, inode->i_sb, offset); if (remain > max_blocks) remain = max_blocks; bh_result->b_size = (remain << inode->i_blkbits); goto out_brelse; } if (next == ~0) break; brelse(bh); bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); } if (create) { ret = omfs_grow_extent(inode, oe, &new_block); if (ret == 0) { mark_buffer_dirty(bh); mark_inode_dirty(inode); map_bh(bh_result, inode->i_sb, clus_to_blk(sbi, new_block)); } } out_brelse: brelse(bh); out: return ret; } static int omfs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, omfs_get_block); } static int omfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); } static int omfs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, omfs_get_block, wbc); } static int omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, omfs_get_block); } static int omfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, omfs_get_block); if (unlikely(ret)) { loff_t isize = mapping->host->i_size; if (pos + len > isize) vmtruncate(mapping->host, isize); } return ret; } static sector_t omfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, omfs_get_block); } const struct file_operations omfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, }; static int omfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error; error = inode_change_ok(inode, attr); if (error) return error; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = vmtruncate(inode, attr->ia_size); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } const struct inode_operations omfs_file_inops = { .setattr = omfs_setattr, .truncate = omfs_truncate }; const struct address_space_operations omfs_aops = { .readpage = omfs_readpage, .readpages = omfs_readpages, .writepage = omfs_writepage, .writepages = omfs_writepages, .write_begin = omfs_write_begin, .write_end = generic_write_end, .bmap = omfs_bmap, };
gpl-2.0
edoko/android_samsung_galaxy_pop
drivers/block/mg_disk.c
8358
26577
/* * drivers/block/mg_disk.c * * Support for the mGine m[g]flash IO mode. * Based on legacy hd.c * * (c) 2008 mGine Co.,LTD * (c) 2008 unsik Kim <donari75@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/ata.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mg_disk.h> #include <linux/slab.h> #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) /* name for block device */ #define MG_DISK_NAME "mgd" #define MG_DISK_MAJ 0 #define MG_DISK_MAX_PART 16 #define MG_SECTOR_SIZE 512 #define MG_MAX_SECTS 256 /* Register offsets */ #define MG_BUFF_OFFSET 0x8000 #define MG_REG_OFFSET 0xC000 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) /* handy status */ #define MG_STAT_READY (ATA_DRDY | ATA_DSC) #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \ ATA_ERR))) == MG_STAT_READY) /* error code for others */ #define MG_ERR_NONE 0 #define MG_ERR_TIMEOUT 0x100 #define MG_ERR_INIT_STAT 0x101 #define MG_ERR_TRANSLATION 0x102 #define MG_ERR_CTRL_RST 0x103 #define MG_ERR_INV_STAT 0x104 #define MG_ERR_RSTOUT 0x105 #define MG_MAX_ERRORS 6 /* Max read/write errors */ /* command */ #define MG_CMD_RD 0x20 #define MG_CMD_WR 0x30 #define MG_CMD_SLEEP 0x99 #define MG_CMD_WAKEUP 0xC3 #define MG_CMD_ID 0xEC #define MG_CMD_WR_CONF 0x3C #define MG_CMD_RD_CONF 0x40 /* operation mode */ #define MG_OP_CASCADE (1 << 0) #define MG_OP_CASCADE_SYNC_RD (1 << 1) #define MG_OP_CASCADE_SYNC_WR (1 << 2) #define MG_OP_INTERLEAVE (1 << 3) /* synchronous */ #define MG_BURST_LAT_4 (3 << 4) #define MG_BURST_LAT_5 (4 << 4) #define MG_BURST_LAT_6 (5 << 4) #define MG_BURST_LAT_7 (6 << 4) #define MG_BURST_LAT_8 (7 << 4) #define MG_BURST_LEN_4 (1 << 1) #define MG_BURST_LEN_8 (2 << 1) #define MG_BURST_LEN_16 (3 << 1) #define MG_BURST_LEN_32 (4 << 1) #define MG_BURST_LEN_CONT (0 << 1) /* timeout value (unit: ms) */ #define MG_TMAX_CONF_TO_CMD 1 #define MG_TMAX_WAIT_RD_DRQ 10 #define MG_TMAX_WAIT_WR_DRQ 500 #define MG_TMAX_RST_TO_BUSY 10 #define MG_TMAX_HDRST_TO_RDY 500 #define MG_TMAX_SWRST_TO_RDY 500 #define MG_TMAX_RSTOUT 3000 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) /* main structure for mflash driver */ struct mg_host { struct device *dev; struct request_queue *breq; struct request *req; spinlock_t lock; struct gendisk *gd; struct timer_list timer; void (*mg_do_intr) (struct mg_host *); u16 id[ATA_ID_WORDS]; u16 cyls; u16 heads; u16 sectors; u32 n_sectors; u32 nres_sectors; void __iomem *dev_base; unsigned int irq; unsigned int rst; unsigned int rstout; u32 major; u32 error; }; /* * Debugging macro and defines */ #undef DO_MG_DEBUG #ifdef DO_MG_DEBUG # define MG_DBG(fmt, args...) \ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) #else /* CONFIG_MG_DEBUG */ # define MG_DBG(fmt, args...) do { } while (0) #endif /* CONFIG_MG_DEBUG */ static void mg_request(struct request_queue *); static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) { if (__blk_end_request(host->req, err, nr_bytes)) return true; host->req = NULL; return false; } static bool mg_end_request_cur(struct mg_host *host, int err) { return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); } static void mg_dump_status(const char *msg, unsigned int stat, struct mg_host *host) { char *name = MG_DISK_NAME; if (host->req) name = host->req->rq_disk->disk_name; printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); if (stat & ATA_BUSY) printk("Busy "); if (stat & ATA_DRDY) printk("DriveReady "); if (stat & ATA_DF) printk("WriteFault "); if (stat & ATA_DSC) printk("SeekComplete "); if (stat & ATA_DRQ) printk("DataRequest "); if (stat & ATA_CORR) printk("CorrectedError "); if (stat & ATA_ERR) printk("Error "); printk("}\n"); if ((stat & ATA_ERR) == 0) { host->error = 0; } else { host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, host->error & 0xff); if (host->error & ATA_BBK) printk("BadSector "); if (host->error & ATA_UNC) printk("UncorrectableError "); if (host->error & ATA_IDNF) printk("SectorIdNotFound "); if (host->error & ATA_ABORTED) printk("DriveStatusError "); if (host->error & ATA_AMNF) printk("AddrMarkNotFound "); printk("}"); if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { if (host->req) printk(", sector=%u", (unsigned int)blk_rq_pos(host->req)); } printk("\n"); } } static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) { u8 status; unsigned long expire, cur_jiffies; struct mg_drv_data *prv_data = host->dev->platform_data; host->error = MG_ERR_NONE; expire = jiffies + msecs_to_jiffies(msec); /* These 2 times dummy status read prevents reading invalid * status. A very little time (3 times of mflash operating clk) * is required for busy bit is set. Use dummy read instead of * busy wait, because mflash's PLL is machine dependent. */ if (prv_data->use_polling) { status = inb((unsigned long)host->dev_base + MG_REG_STATUS); status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); do { cur_jiffies = jiffies; if (status & ATA_BUSY) { if (expect == ATA_BUSY) break; } else { /* Check the error condition! */ if (status & ATA_ERR) { mg_dump_status("mg_wait", status, host); break; } if (expect == MG_STAT_READY) if (MG_READY_OK(status)) break; if (expect == ATA_DRQ) if (status & ATA_DRQ) break; } if (!msec) { mg_dump_status("not ready", status, host); return MG_ERR_INV_STAT; } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } while (time_before(cur_jiffies, expire)); if (time_after_eq(cur_jiffies, expire) && msec) host->error = MG_ERR_TIMEOUT; return host->error; } static unsigned int mg_wait_rstout(u32 rstout, u32 msec) { unsigned long expire; expire = jiffies + msecs_to_jiffies(msec); while (time_before(jiffies, expire)) { if (gpio_get_value(rstout) == 1) return MG_ERR_NONE; msleep(10); } return MG_ERR_RSTOUT; } static void mg_unexpected_intr(struct mg_host *host) { u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_dump_status("mg_unexpected_intr", status, host); } static irqreturn_t mg_irq(int irq, void *dev_id) { struct mg_host *host = dev_id; void (*handler)(struct mg_host *) = host->mg_do_intr; spin_lock(&host->lock); host->mg_do_intr = NULL; del_timer(&host->timer); if (!handler) handler = mg_unexpected_intr; handler(host); spin_unlock(&host->lock); return IRQ_HANDLED; } /* local copy of ata_id_string() */ static void mg_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; BUG_ON(len & 1); while (len > 0) { c = id[ofs] >> 8; *s = c; s++; c = id[ofs] & 0xff; *s = c; s++; ofs++; len -= 2; } } /* local copy of ata_id_c_string() */ static void mg_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned char *p; mg_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); while (p > s && p[-1] == ' ') p--; *p = '\0'; } static int mg_get_disk_id(struct mg_host *host) { u32 i; s32 err; const u16 *id = host->id; struct mg_drv_data *prv_data = host->dev->platform_data; char fwrev[ATA_ID_FW_REV_LEN + 1]; char model[ATA_ID_PROD_LEN + 1]; char serial[ATA_ID_SERNO_LEN + 1]; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ); if (err) return err; for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++) host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + i * 2)); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD); if (err) return err; if ((id[ATA_ID_FIELD_VALID] & 1) == 0) return MG_ERR_TRANSLATION; host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY); host->cyls = id[ATA_ID_CYLS]; host->heads = id[ATA_ID_HEADS]; host->sectors = id[ATA_ID_SECTORS]; if (MG_RES_SEC && host->heads && host->sectors) { /* modify cyls, n_sectors */ host->cyls = (host->n_sectors - MG_RES_SEC) / host->heads / host->sectors; host->nres_sectors = host->n_sectors - host->cyls * host->heads * host->sectors; host->n_sectors -= host->nres_sectors; } mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); printk(KERN_INFO "mg_disk: model: %s\n", model); printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); printk(KERN_INFO "mg_disk: serial: %s\n", serial); printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n", host->n_sectors, host->nres_sectors); if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return err; } static int mg_disk_init(struct mg_host *host) { struct mg_drv_data *prv_data = host->dev->platform_data; s32 err; u8 init_status; /* hdd rst low */ gpio_set_value(host->rst, 0); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* hdd rst high */ gpio_set_value(host->rst, 1); err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY); if (err) return err; /* soft reset on */ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0), (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* soft reset off */ outb(prv_data->use_polling ? ATA_NIEN : 0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); if (err) return err; init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf; if (init_status == 0xf) return MG_ERR_INIT_STAT; return err; } static void mg_bad_rw_intr(struct mg_host *host) { if (host->req) if (++host->req->errors >= MG_MAX_ERRORS || host->error == MG_ERR_TIMEOUT) mg_end_request_cur(host, -EIO); } static unsigned int mg_out(struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt, unsigned int cmd, void (*intr_addr)(struct mg_host *)) { struct mg_drv_data *prv_data = host->dev->platform_data; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return host->error; if (!prv_data->use_polling) { host->mg_do_intr = intr_addr; mod_timer(&host->timer, jiffies + 3 * HZ); } if (MG_RES_SEC) sect_num += MG_RES_SEC; outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT); outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM); outb((u8)(sect_num >> 8), (unsigned long)host->dev_base + MG_REG_CYL_LOW); outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + MG_REG_CYL_HIGH); outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS), (unsigned long)host->dev_base + MG_REG_DRV_HEAD); outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); return MG_ERR_NONE; } static void mg_read_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_read(struct request *req) { struct mg_host *host = req->rq_disk->private_data; if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), MG_CMD_RD, NULL) != MG_ERR_NONE) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", blk_rq_sectors(req), blk_rq_pos(req), req->buffer); do { if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } mg_read_one(host, req); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_write_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_write(struct request *req) { struct mg_host *host = req->rq_disk->private_data; unsigned int rem = blk_rq_sectors(req); if (mg_out(host, blk_rq_pos(req), rem, MG_CMD_WR, NULL) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", rem, blk_rq_pos(req), req->buffer); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } do { mg_write_one(host, req); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); rem--; if (rem > 1 && mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } else if (mg_wait(host, MG_STAT_READY, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_read_intr(struct mg_host *host) { struct request *req = host->req; u32 i; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if (i & ATA_DRQ) goto ok_to_read; } while (0); mg_dump_status("mg_read_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { /* set handler if read remains */ host->mg_do_intr = mg_read_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } else /* goto next request */ mg_request(host->breq); } static void mg_write_intr(struct mg_host *host) { struct request *req = host->req; u32 i; bool rem; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ)) goto ok_to_write; } while (0); mg_dump_status("mg_write_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_write: if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req), req->buffer); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } /* send write confirm */ outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (!rem) mg_request(host->breq); } void mg_times_out(unsigned long data) { struct mg_host *host = (struct mg_host *)data; char *name; spin_lock_irq(&host->lock); if (!host->req) goto out_unlock; host->mg_do_intr = NULL; name = host->req->rq_disk->disk_name; printk(KERN_DEBUG "%s: timeout\n", name); host->error = MG_ERR_TIMEOUT; mg_bad_rw_intr(host); out_unlock: mg_request(host->breq); spin_unlock_irq(&host->lock); } static void mg_request_poll(struct request_queue *q) { struct mg_host *host = q->queuedata; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (rq_data_dir(host->req) == READ) mg_read(host->req); else mg_write(host->req); } } static unsigned int mg_issue_req(struct request *req, struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt) { switch (rq_data_dir(req)) { case READ: if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } break; case WRITE: /* TODO : handler */ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } del_timer(&host->timer); mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ); outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (host->error) { mg_bad_rw_intr(host); return host->error; } mg_write_one(host, req); mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); break; } return MG_ERR_NONE; } /* This function also called from IRQ context */ static void mg_request(struct request_queue *q) { struct mg_host *host = q->queuedata; struct request *req; u32 sect_num, sect_cnt; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } req = host->req; /* check unwanted request call */ if (host->mg_do_intr) return; del_timer(&host->timer); sect_num = blk_rq_pos(req); /* deal whole segments */ sect_cnt = blk_rq_sectors(req); /* sanity check */ if (sect_num >= get_capacity(req->rq_disk) || ((sect_num + sect_cnt) > get_capacity(req->rq_disk))) { printk(KERN_WARNING "%s: bad access: sector=%d, count=%d\n", req->rq_disk->disk_name, sect_num, sect_cnt); mg_end_request_cur(host, -EIO); continue; } if (unlikely(req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (!mg_issue_req(req, host, sect_num, sect_cnt)) return; } } static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mg_host *host = bdev->bd_disk->private_data; geo->cylinders = (unsigned short)host->cyls; geo->heads = (unsigned char)host->heads; geo->sectors = (unsigned char)host->sectors; return 0; } static const struct block_device_operations mg_disk_ops = { .getgeo = mg_getgeo }; static int mg_suspend(struct platform_device *plat_dev, pm_message_t state) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash deep sleep */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return -EIO; } return 0; } static int mg_resume(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash wakeup */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return 0; } static int mg_probe(struct platform_device *plat_dev) { struct mg_host *host; struct resource *rsc; struct mg_drv_data *prv_data = plat_dev->dev.platform_data; int err = 0; if (!prv_data) { printk(KERN_ERR "%s:%d fail (no driver_data)\n", __func__, __LINE__); err = -EINVAL; goto probe_err; } /* alloc mg_host */ host = kzalloc(sizeof(struct mg_host), GFP_KERNEL); if (!host) { printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n", __func__, __LINE__); err = -ENOMEM; goto probe_err; } host->major = MG_DISK_MAJ; /* link each other */ prv_data->host = host; host->dev = &plat_dev->dev; /* io remap */ rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); if (!rsc) { printk(KERN_ERR "%s:%d platform_get_resource fail\n", __func__, __LINE__); err = -EINVAL; goto probe_err_2; } host->dev_base = ioremap(rsc->start, resource_size(rsc)); if (!host->dev_base) { printk(KERN_ERR "%s:%d ioremap fail\n", __func__, __LINE__); err = -EIO; goto probe_err_2; } MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base); /* get reset pin */ rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RST_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3; } host->rst = rsc->start; /* init rst pin */ err = gpio_request(host->rst, MG_RST_PIN); if (err) goto probe_err_3; gpio_direction_output(host->rst, 1); /* reset out pin */ if (!(prv_data->dev_attr & MG_DEV_MASK)) goto probe_err_3a; if (prv_data->dev_attr != MG_BOOT_DEV) { rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RSTOUT_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset-out pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3a; } host->rstout = rsc->start; err = gpio_request(host->rstout, MG_RSTOUT_PIN); if (err) goto probe_err_3a; gpio_direction_input(host->rstout); } /* disk reset */ if (prv_data->dev_attr == MG_STORAGE_DEV) { /* If POR seq. not yet finised, wait */ err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT); if (err) goto probe_err_3b; err = mg_disk_init(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_3b; } } /* get irq resource */ if (!prv_data->use_polling) { host->irq = platform_get_irq(plat_dev, 0); if (host->irq == -ENXIO) { err = host->irq; goto probe_err_3b; } err = request_irq(host->irq, mg_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING, MG_DEV_NAME, host); if (err) { printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", __func__, __LINE__, err); goto probe_err_3b; } } /* get disk id */ err = mg_get_disk_id(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_4; } err = register_blkdev(host->major, MG_DISK_NAME); if (err < 0) { printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n", __func__, __LINE__, err); goto probe_err_4; } if (!host->major) host->major = err; spin_lock_init(&host->lock); if (prv_data->use_polling) host->breq = blk_init_queue(mg_request_poll, &host->lock); else host->breq = blk_init_queue(mg_request, &host->lock); if (!host->breq) { err = -ENOMEM; printk(KERN_ERR "%s:%d (blk_init_queue) fail\n", __func__, __LINE__); goto probe_err_5; } host->breq->queuedata = host; /* mflash is random device, thanx for the noop */ err = elevator_change(host->breq, "noop"); if (err) { printk(KERN_ERR "%s:%d (elevator_init) fail\n", __func__, __LINE__); goto probe_err_6; } blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); init_timer(&host->timer); host->timer.function = mg_times_out; host->timer.data = (unsigned long)host; host->gd = alloc_disk(MG_DISK_MAX_PART); if (!host->gd) { printk(KERN_ERR "%s:%d (alloc_disk) fail\n", __func__, __LINE__); err = -ENOMEM; goto probe_err_7; } host->gd->major = host->major; host->gd->first_minor = 0; host->gd->fops = &mg_disk_ops; host->gd->queue = host->breq; host->gd->private_data = host; sprintf(host->gd->disk_name, MG_DISK_NAME"a"); set_capacity(host->gd, host->n_sectors); add_disk(host->gd); return err; probe_err_7: del_timer_sync(&host->timer); probe_err_6: blk_cleanup_queue(host->breq); probe_err_5: unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME); probe_err_4: if (!prv_data->use_polling) free_irq(host->irq, host); probe_err_3b: gpio_free(host->rstout); probe_err_3a: gpio_free(host->rst); probe_err_3: iounmap(host->dev_base); probe_err_2: kfree(host); probe_err: return err; } static int mg_remove(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; int err = 0; /* delete timer */ del_timer_sync(&host->timer); /* remove disk */ if (host->gd) { del_gendisk(host->gd); put_disk(host->gd); } /* remove queue */ if (host->breq) blk_cleanup_queue(host->breq); /* unregister blk device */ unregister_blkdev(host->major, MG_DISK_NAME); /* free irq */ if (!prv_data->use_polling) free_irq(host->irq, host); /* free reset-out pin */ if (prv_data->dev_attr != MG_BOOT_DEV) gpio_free(host->rstout); /* free rst pin */ if (host->rst) gpio_free(host->rst); /* unmap io */ if (host->dev_base) iounmap(host->dev_base); /* free mg_host */ kfree(host); return err; } static struct platform_driver mg_disk_driver = { .probe = mg_probe, .remove = mg_remove, .suspend = mg_suspend, .resume = mg_resume, .driver = { .name = MG_DEV_NAME, .owner = THIS_MODULE, } }; /**************************************************************************** * * Module stuff * ****************************************************************************/ static int __init mg_init(void) { printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n"); return platform_driver_register(&mg_disk_driver); } static void __exit mg_exit(void) { printk(KERN_INFO "mflash driver : bye bye\n"); platform_driver_unregister(&mg_disk_driver); } module_init(mg_init); module_exit(mg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("unsik Kim <donari75@gmail.com>"); MODULE_DESCRIPTION("mGine m[g]flash device driver");
gpl-2.0
utkanos/android_htc_mecha_kernel_oc
fs/sysv/balloc.c
10150
5765
/* * linux/fs/sysv/balloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/balloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing blocks. */ #include <linux/buffer_head.h> #include <linux/string.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tfree = *sb->sv_free_blocks but we nevertheless keep it up to date. */ static inline sysv_zone_t *get_chunk(struct super_block *sb, struct buffer_head *bh) { char *bh_data = bh->b_data; if (SYSV_SB(sb)->s_type == FSTYPE_SYSV4) return (sysv_zone_t*)(bh_data+4); else return (sysv_zone_t*)(bh_data+2); } /* NOTE NOTE NOTE: nr is a block number _as_ _stored_ _on_ _disk_ */ void sysv_free_block(struct super_block * sb, sysv_zone_t nr) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; sysv_zone_t *blocks = sbi->s_bcache; unsigned count; unsigned block = fs32_to_cpu(sbi, nr); /* * This code does not work at all for AFS (it has a bitmap * free list). As AFS is supposed to be read-only no one * should call this for an AFS filesystem anyway... */ if (sbi->s_type == FSTYPE_AFS) return; if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_free_block: trying to free block not in datazone\n"); return; } lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count > sbi->s_flc_size) { printk("sysv_free_block: flc_count > flc_size\n"); unlock_super(sb); return; } /* If the free list head in super-block is full, it is copied * into this block being freed, ditto if it's completely empty * (applies only on Coherent). */ if (count == sbi->s_flc_size || count == 0) { block += sbi->s_block_base; bh = sb_getblk(sb, block); if (!bh) { printk("sysv_free_block: getblk() failed\n"); unlock_super(sb); return; } memset(bh->b_data, 0, sb->s_blocksize); *(__fs16*)bh->b_data = cpu_to_fs16(sbi, count); memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t)); mark_buffer_dirty(bh); set_buffer_uptodate(bh); brelse(bh); count = 0; } sbi->s_bcache[count++] = nr; *sbi->s_bcache_count = cpu_to_fs16(sbi, count); fs32_add(sbi, sbi->s_free_blocks, 1); dirty_sb(sb); unlock_super(sb); } sysv_zone_t sysv_new_block(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int block; sysv_zone_t nr; struct buffer_head * bh; unsigned count; lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count == 0) /* Applies only to Coherent FS */ goto Enospc; nr = sbi->s_bcache[--count]; if (nr == 0) /* Applies only to Xenix FS, SystemV FS */ goto Enospc; block = fs32_to_cpu(sbi, nr); *sbi->s_bcache_count = cpu_to_fs16(sbi, count); if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_new_block: new block %d is not in data zone\n", block); goto Enospc; } if (count == 0) { /* the last block continues the free list */ unsigned count; block += sbi->s_block_base; if (!(bh = sb_bread(sb, block))) { printk("sysv_new_block: cannot read free-list block\n"); /* retry this same block next time */ *sbi->s_bcache_count = cpu_to_fs16(sbi, 1); goto Enospc; } count = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); if (count > sbi->s_flc_size) { printk("sysv_new_block: free-list block with >flc_size entries\n"); brelse(bh); goto Enospc; } *sbi->s_bcache_count = cpu_to_fs16(sbi, count); memcpy(sbi->s_bcache, get_chunk(sb, bh), count * sizeof(sysv_zone_t)); brelse(bh); } /* Now the free list head in the superblock is valid again. */ fs32_add(sbi, sbi->s_free_blocks, -1); dirty_sb(sb); unlock_super(sb); return nr; Enospc: unlock_super(sb); return 0; } unsigned long sysv_count_free_blocks(struct super_block * sb) { struct sysv_sb_info * sbi = SYSV_SB(sb); int sb_count; int count; struct buffer_head * bh = NULL; sysv_zone_t *blocks; unsigned block; int n; /* * This code does not work at all for AFS (it has a bitmap * free list). As AFS is supposed to be read-only we just * lie and say it has no free block at all. */ if (sbi->s_type == FSTYPE_AFS) return 0; lock_super(sb); sb_count = fs32_to_cpu(sbi, *sbi->s_free_blocks); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; n = fs16_to_cpu(sbi, *sbi->s_bcache_count); blocks = sbi->s_bcache; while (1) { sysv_zone_t zone; if (n > sbi->s_flc_size) goto E2big; zone = 0; while (n && (zone = blocks[--n]) != 0) count++; if (zone == 0) break; block = fs32_to_cpu(sbi, zone); if (bh) brelse(bh); if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) goto Einval; block += sbi->s_block_base; bh = sb_bread(sb, block); if (!bh) goto Eio; n = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); blocks = get_chunk(sb, bh); } if (bh) brelse(bh); if (count != sb_count) goto Ecount; done: unlock_super(sb); return count; Einval: printk("sysv_count_free_blocks: new block %d is not in data zone\n", block); goto trust_sb; Eio: printk("sysv_count_free_blocks: cannot read free-list block\n"); goto trust_sb; E2big: printk("sysv_count_free_blocks: >flc_size entries in free-list block\n"); if (bh) brelse(bh); trust_sb: count = sb_count; goto done; Ecount: printk("sysv_count_free_blocks: free block count was %d, " "correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_free_blocks = cpu_to_fs32(sbi, count); dirty_sb(sb); } goto done; }
gpl-2.0
onealtom/MYD-J335X-Linux-Kernel
fs/sysv/balloc.c
10150
5765
/* * linux/fs/sysv/balloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/balloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing blocks. */ #include <linux/buffer_head.h> #include <linux/string.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tfree = *sb->sv_free_blocks but we nevertheless keep it up to date. */ static inline sysv_zone_t *get_chunk(struct super_block *sb, struct buffer_head *bh) { char *bh_data = bh->b_data; if (SYSV_SB(sb)->s_type == FSTYPE_SYSV4) return (sysv_zone_t*)(bh_data+4); else return (sysv_zone_t*)(bh_data+2); } /* NOTE NOTE NOTE: nr is a block number _as_ _stored_ _on_ _disk_ */ void sysv_free_block(struct super_block * sb, sysv_zone_t nr) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; sysv_zone_t *blocks = sbi->s_bcache; unsigned count; unsigned block = fs32_to_cpu(sbi, nr); /* * This code does not work at all for AFS (it has a bitmap * free list). As AFS is supposed to be read-only no one * should call this for an AFS filesystem anyway... */ if (sbi->s_type == FSTYPE_AFS) return; if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_free_block: trying to free block not in datazone\n"); return; } lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count > sbi->s_flc_size) { printk("sysv_free_block: flc_count > flc_size\n"); unlock_super(sb); return; } /* If the free list head in super-block is full, it is copied * into this block being freed, ditto if it's completely empty * (applies only on Coherent). */ if (count == sbi->s_flc_size || count == 0) { block += sbi->s_block_base; bh = sb_getblk(sb, block); if (!bh) { printk("sysv_free_block: getblk() failed\n"); unlock_super(sb); return; } memset(bh->b_data, 0, sb->s_blocksize); *(__fs16*)bh->b_data = cpu_to_fs16(sbi, count); memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t)); mark_buffer_dirty(bh); set_buffer_uptodate(bh); brelse(bh); count = 0; } sbi->s_bcache[count++] = nr; *sbi->s_bcache_count = cpu_to_fs16(sbi, count); fs32_add(sbi, sbi->s_free_blocks, 1); dirty_sb(sb); unlock_super(sb); } sysv_zone_t sysv_new_block(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int block; sysv_zone_t nr; struct buffer_head * bh; unsigned count; lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count == 0) /* Applies only to Coherent FS */ goto Enospc; nr = sbi->s_bcache[--count]; if (nr == 0) /* Applies only to Xenix FS, SystemV FS */ goto Enospc; block = fs32_to_cpu(sbi, nr); *sbi->s_bcache_count = cpu_to_fs16(sbi, count); if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_new_block: new block %d is not in data zone\n", block); goto Enospc; } if (count == 0) { /* the last block continues the free list */ unsigned count; block += sbi->s_block_base; if (!(bh = sb_bread(sb, block))) { printk("sysv_new_block: cannot read free-list block\n"); /* retry this same block next time */ *sbi->s_bcache_count = cpu_to_fs16(sbi, 1); goto Enospc; } count = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); if (count > sbi->s_flc_size) { printk("sysv_new_block: free-list block with >flc_size entries\n"); brelse(bh); goto Enospc; } *sbi->s_bcache_count = cpu_to_fs16(sbi, count); memcpy(sbi->s_bcache, get_chunk(sb, bh), count * sizeof(sysv_zone_t)); brelse(bh); } /* Now the free list head in the superblock is valid again. */ fs32_add(sbi, sbi->s_free_blocks, -1); dirty_sb(sb); unlock_super(sb); return nr; Enospc: unlock_super(sb); return 0; } unsigned long sysv_count_free_blocks(struct super_block * sb) { struct sysv_sb_info * sbi = SYSV_SB(sb); int sb_count; int count; struct buffer_head * bh = NULL; sysv_zone_t *blocks; unsigned block; int n; /* * This code does not work at all for AFS (it has a bitmap * free list). As AFS is supposed to be read-only we just * lie and say it has no free block at all. */ if (sbi->s_type == FSTYPE_AFS) return 0; lock_super(sb); sb_count = fs32_to_cpu(sbi, *sbi->s_free_blocks); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; n = fs16_to_cpu(sbi, *sbi->s_bcache_count); blocks = sbi->s_bcache; while (1) { sysv_zone_t zone; if (n > sbi->s_flc_size) goto E2big; zone = 0; while (n && (zone = blocks[--n]) != 0) count++; if (zone == 0) break; block = fs32_to_cpu(sbi, zone); if (bh) brelse(bh); if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) goto Einval; block += sbi->s_block_base; bh = sb_bread(sb, block); if (!bh) goto Eio; n = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); blocks = get_chunk(sb, bh); } if (bh) brelse(bh); if (count != sb_count) goto Ecount; done: unlock_super(sb); return count; Einval: printk("sysv_count_free_blocks: new block %d is not in data zone\n", block); goto trust_sb; Eio: printk("sysv_count_free_blocks: cannot read free-list block\n"); goto trust_sb; E2big: printk("sysv_count_free_blocks: >flc_size entries in free-list block\n"); if (bh) brelse(bh); trust_sb: count = sb_count; goto done; Ecount: printk("sysv_count_free_blocks: free block count was %d, " "correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_free_blocks = cpu_to_fs32(sbi, count); dirty_sb(sb); } goto done; }
gpl-2.0
kasperhettinga/p4wifi_stock
drivers/message/i2o/bus-osm.c
13478
4125
/* * Bus Adapter OSM * * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Fixes/additions: * Markus Lidel <Markus.Lidel@shadowconnect.com> * initial version. */ #include <linux/module.h> #include <linux/i2o.h> #define OSM_NAME "bus-osm" #define OSM_VERSION "1.317" #define OSM_DESCRIPTION "I2O Bus Adapter OSM" static struct i2o_driver i2o_bus_driver; /* Bus OSM class handling definition */ static struct i2o_class_id i2o_bus_class_id[] = { {I2O_CLASS_BUS_ADAPTER}, {I2O_CLASS_END} }; /** * i2o_bus_scan - Scan the bus for new devices * @dev: I2O device of the bus, which should be scanned * * Scans the bus dev for new / removed devices. After the scan a new LCT * will be fetched automatically. * * Returns 0 on success or negative error code on failure. */ static int i2o_bus_scan(struct i2o_device *dev) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return -ETIMEDOUT; msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data. tid); return i2o_msg_post_wait(dev->iop, msg, 60); }; /** * i2o_bus_store_scan - Scan the I2O Bus Adapter * @d: device which should be scanned * @attr: device_attribute * @buf: output buffer * @count: buffer size * * Returns count. */ static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct i2o_device *i2o_dev = to_i2o_device(d); int rc; if ((rc = i2o_bus_scan(i2o_dev))) osm_warn("bus scan failed %d\n", rc); return count; } /* Bus Adapter OSM device attributes */ static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan); /** * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it * @dev: device to verify if it is a I2O Bus Adapter device * * Because we want all Bus Adapters always return 0. * Except when we fail. Then we are sad. * * Returns 0, except when we fail to excel. */ static int i2o_bus_probe(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(get_device(dev)); int rc; rc = device_create_file(dev, &dev_attr_scan); if (rc) goto err_out; osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid); return 0; err_out: put_device(dev); return rc; }; /** * i2o_bus_remove - remove the I2O Bus Adapter device from the system again * @dev: I2O Bus Adapter device which should be removed * * Always returns 0. */ static int i2o_bus_remove(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); device_remove_file(dev, &dev_attr_scan); put_device(dev); osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); return 0; }; /* Bus Adapter OSM driver struct */ static struct i2o_driver i2o_bus_driver = { .name = OSM_NAME, .classes = i2o_bus_class_id, .driver = { .probe = i2o_bus_probe, .remove = i2o_bus_remove, }, }; /** * i2o_bus_init - Bus Adapter OSM initialization function * * Only register the Bus Adapter OSM in the I2O core. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_bus_init(void) { int rc; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); /* Register Bus Adapter OSM into I2O core */ rc = i2o_driver_register(&i2o_bus_driver); if (rc) { osm_err("Could not register Bus Adapter OSM\n"); return rc; } return 0; }; /** * i2o_bus_exit - Bus Adapter OSM exit function * * Unregisters Bus Adapter OSM from I2O core. */ static void __exit i2o_bus_exit(void) { i2o_driver_unregister(&i2o_bus_driver); }; MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_bus_init); module_exit(i2o_bus_exit);
gpl-2.0
myfluxi/android_kernel_lge_hammerhead
sound/isa/gus/gus_mixer.c
13734
6149
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of ICS 2101 chip and "mixer" in GF1 chip * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/wait.h> #include <sound/core.h> #include <sound/control.h> #include <sound/gus.h> /* * */ #define GF1_SINGLE(xname, xindex, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_gf1_info_single, \ .get = snd_gf1_get_single, .put = snd_gf1_put_single, \ .private_value = shift | (invert << 8) } #define snd_gf1_info_single snd_ctl_boolean_mono_info static int snd_gf1_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; ucontrol->value.integer.value[0] = (gus->mix_cntrl_reg >> shift) & 1; if (invert) ucontrol->value.integer.value[0] ^= 1; return 0; } static int snd_gf1_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; int change; unsigned char oval, nval; nval = ucontrol->value.integer.value[0] & 1; if (invert) nval ^= 1; nval <<= shift; spin_lock_irqsave(&gus->reg_lock, flags); oval = gus->mix_cntrl_reg; nval = (oval & ~(1 << shift)) | nval; change = nval != oval; outb(gus->mix_cntrl_reg = nval, GUSP(gus, MIXCNTRLREG)); outb(gus->gf1.active_voice = 0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } #define ICS_DOUBLE(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ics_info_double, \ .get = snd_ics_get_double, .put = snd_ics_put_double, \ .private_value = addr } static int snd_ics_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_ics_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; unsigned char left, right; spin_lock_irqsave(&gus->reg_lock, flags); left = gus->gf1.ics_regs[addr][0]; right = gus->gf1.ics_regs[addr][1]; spin_unlock_irqrestore(&gus->reg_lock, flags); ucontrol->value.integer.value[0] = left & 127; ucontrol->value.integer.value[1] = right & 127; return 0; } static int snd_ics_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; int change; unsigned char val1, val2, oval1, oval2, tmp; val1 = ucontrol->value.integer.value[0] & 127; val2 = ucontrol->value.integer.value[1] & 127; spin_lock_irqsave(&gus->reg_lock, flags); oval1 = gus->gf1.ics_regs[addr][0]; oval2 = gus->gf1.ics_regs[addr][1]; change = val1 != oval1 || val2 != oval2; gus->gf1.ics_regs[addr][0] = val1; gus->gf1.ics_regs[addr][1] = val2; if (gus->ics_flag && gus->ics_flipped && (addr == SNDRV_ICS_GF1_DEV || addr == SNDRV_ICS_MASTER_DEV)) { tmp = val1; val1 = val2; val2 = tmp; } addr <<= 3; outb(addr | 0, GUSP(gus, MIXCNTRLPORT)); outb(1, GUSP(gus, MIXDATAPORT)); outb(addr | 2, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val1, GUSP(gus, MIXDATAPORT)); outb(addr | 1, GUSP(gus, MIXCNTRLPORT)); outb(2, GUSP(gus, MIXDATAPORT)); outb(addr | 3, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val2, GUSP(gus, MIXDATAPORT)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_gf1_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), GF1_SINGLE("Line Switch", 0, 0, 1), GF1_SINGLE("Mic Switch", 0, 2, 0) }; static struct snd_kcontrol_new snd_ics_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), ICS_DOUBLE("Master Playback Volume", 0, SNDRV_ICS_MASTER_DEV), ICS_DOUBLE("Synth Playback Volume", 0, SNDRV_ICS_GF1_DEV), GF1_SINGLE("Line Switch", 0, 0, 1), ICS_DOUBLE("Line Playback Volume", 0, SNDRV_ICS_LINE_DEV), GF1_SINGLE("Mic Switch", 0, 2, 0), ICS_DOUBLE("Mic Playback Volume", 0, SNDRV_ICS_MIC_DEV), ICS_DOUBLE("CD Playback Volume", 0, SNDRV_ICS_CD_DEV) }; int snd_gf1_new_mixer(struct snd_gus_card * gus) { struct snd_card *card; unsigned int idx, max; int err; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; if (gus->ics_flag) snd_component_add(card, "ICS2101"); if (card->mixername[0] == '\0') { strcpy(card->mixername, gus->ics_flag ? "GF1,ICS2101" : "GF1"); } else { if (gus->ics_flag) strcat(card->mixername, ",ICS2101"); strcat(card->mixername, ",GF1"); } if (!gus->ics_flag) { max = gus->ess_flag ? 1 : ARRAY_SIZE(snd_gf1_controls); for (idx = 0; idx < max; idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus))) < 0) return err; } } else { for (idx = 0; idx < ARRAY_SIZE(snd_ics_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus))) < 0) return err; } } return 0; }
gpl-2.0
Team-Hydra/android_kernel_htc_msm8660-caf
sound/isa/gus/gus_mixer.c
13734
6149
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of ICS 2101 chip and "mixer" in GF1 chip * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/wait.h> #include <sound/core.h> #include <sound/control.h> #include <sound/gus.h> /* * */ #define GF1_SINGLE(xname, xindex, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_gf1_info_single, \ .get = snd_gf1_get_single, .put = snd_gf1_put_single, \ .private_value = shift | (invert << 8) } #define snd_gf1_info_single snd_ctl_boolean_mono_info static int snd_gf1_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; ucontrol->value.integer.value[0] = (gus->mix_cntrl_reg >> shift) & 1; if (invert) ucontrol->value.integer.value[0] ^= 1; return 0; } static int snd_gf1_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; int change; unsigned char oval, nval; nval = ucontrol->value.integer.value[0] & 1; if (invert) nval ^= 1; nval <<= shift; spin_lock_irqsave(&gus->reg_lock, flags); oval = gus->mix_cntrl_reg; nval = (oval & ~(1 << shift)) | nval; change = nval != oval; outb(gus->mix_cntrl_reg = nval, GUSP(gus, MIXCNTRLREG)); outb(gus->gf1.active_voice = 0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } #define ICS_DOUBLE(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ics_info_double, \ .get = snd_ics_get_double, .put = snd_ics_put_double, \ .private_value = addr } static int snd_ics_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_ics_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; unsigned char left, right; spin_lock_irqsave(&gus->reg_lock, flags); left = gus->gf1.ics_regs[addr][0]; right = gus->gf1.ics_regs[addr][1]; spin_unlock_irqrestore(&gus->reg_lock, flags); ucontrol->value.integer.value[0] = left & 127; ucontrol->value.integer.value[1] = right & 127; return 0; } static int snd_ics_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; int change; unsigned char val1, val2, oval1, oval2, tmp; val1 = ucontrol->value.integer.value[0] & 127; val2 = ucontrol->value.integer.value[1] & 127; spin_lock_irqsave(&gus->reg_lock, flags); oval1 = gus->gf1.ics_regs[addr][0]; oval2 = gus->gf1.ics_regs[addr][1]; change = val1 != oval1 || val2 != oval2; gus->gf1.ics_regs[addr][0] = val1; gus->gf1.ics_regs[addr][1] = val2; if (gus->ics_flag && gus->ics_flipped && (addr == SNDRV_ICS_GF1_DEV || addr == SNDRV_ICS_MASTER_DEV)) { tmp = val1; val1 = val2; val2 = tmp; } addr <<= 3; outb(addr | 0, GUSP(gus, MIXCNTRLPORT)); outb(1, GUSP(gus, MIXDATAPORT)); outb(addr | 2, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val1, GUSP(gus, MIXDATAPORT)); outb(addr | 1, GUSP(gus, MIXCNTRLPORT)); outb(2, GUSP(gus, MIXDATAPORT)); outb(addr | 3, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val2, GUSP(gus, MIXDATAPORT)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_gf1_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), GF1_SINGLE("Line Switch", 0, 0, 1), GF1_SINGLE("Mic Switch", 0, 2, 0) }; static struct snd_kcontrol_new snd_ics_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), ICS_DOUBLE("Master Playback Volume", 0, SNDRV_ICS_MASTER_DEV), ICS_DOUBLE("Synth Playback Volume", 0, SNDRV_ICS_GF1_DEV), GF1_SINGLE("Line Switch", 0, 0, 1), ICS_DOUBLE("Line Playback Volume", 0, SNDRV_ICS_LINE_DEV), GF1_SINGLE("Mic Switch", 0, 2, 0), ICS_DOUBLE("Mic Playback Volume", 0, SNDRV_ICS_MIC_DEV), ICS_DOUBLE("CD Playback Volume", 0, SNDRV_ICS_CD_DEV) }; int snd_gf1_new_mixer(struct snd_gus_card * gus) { struct snd_card *card; unsigned int idx, max; int err; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; if (gus->ics_flag) snd_component_add(card, "ICS2101"); if (card->mixername[0] == '\0') { strcpy(card->mixername, gus->ics_flag ? "GF1,ICS2101" : "GF1"); } else { if (gus->ics_flag) strcat(card->mixername, ",ICS2101"); strcat(card->mixername, ",GF1"); } if (!gus->ics_flag) { max = gus->ess_flag ? 1 : ARRAY_SIZE(snd_gf1_controls); for (idx = 0; idx < max; idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus))) < 0) return err; } } else { for (idx = 0; idx < ARRAY_SIZE(snd_ics_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus))) < 0) return err; } } return 0; }
gpl-2.0
neobuddy89/falcon_kernel
sound/usb/mixer.c
167
64638
/* * (Tentative) USB Audio Driver for ALSA * * Mixer control part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * TODOs, for both the mixer and the streaming interfaces: * * - support for UAC2 effect units * - support for graphical equalizers * - RANGE and MEM set commands (UAC2) * - RANGE and MEM interrupt dispatchers (UAC2) * - audio channel clustering (UAC2) * - audio sample rate converter units (UAC2) * - proper handling of clock multipliers (UAC2) * - dispatch clock change notifications (UAC2) * - stop PCM streams which use a clock that became invalid * - stop PCM streams which use a clock selector that has changed * - parse available sample rates again when clock sources changed */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/control.h> #include <sound/hwdep.h> #include <sound/info.h> #include <sound/tlv.h> #include "usbaudio.h" #include "mixer.h" #include "helper.h" #include "mixer_quirks.h" #include "power.h" #define MAX_ID_ELEMS 256 struct usb_audio_term { int id; int type; int channels; unsigned int chconfig; int name; }; struct usbmix_name_map; struct mixer_build { struct snd_usb_audio *chip; struct usb_mixer_interface *mixer; unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; }; /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ enum { USB_XU_CLOCK_RATE = 0xe301, USB_XU_CLOCK_SOURCE = 0xe302, USB_XU_DIGITAL_IO_STATUS = 0xe303, USB_XU_DEVICE_OPTIONS = 0xe304, USB_XU_DIRECT_MONITORING = 0xe305, USB_XU_METERING = 0xe306 }; enum { USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/ USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */ USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */ USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */ }; /* * manual mapping of mixer names * if the mixer topology is too complicated and the parsed names are * ambiguous, add the entries in usbmixer_maps.c. */ #include "mixer_maps.c" static const struct usbmix_name_map * find_map(struct mixer_build *state, int unitid, int control) { const struct usbmix_name_map *p = state->map; if (!p) return NULL; for (p = state->map; p->id; p++) { if (p->id == unitid && (!control || !p->control || control == p->control)) return p; } return NULL; } /* get the mapped name if the unit matches */ static int check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen) { if (!p || !p->name) return 0; buflen--; return strlcpy(buf, p->name, buflen); } /* check whether the control should be ignored */ static inline int check_ignored_ctl(const struct usbmix_name_map *p) { if (!p || p->name || p->dB) return 0; return 1; } /* dB mapping */ static inline void check_mapped_dB(const struct usbmix_name_map *p, struct usb_mixer_elem_info *cval) { if (p && p->dB) { cval->dBmin = p->dB->min; cval->dBmax = p->dB->max; cval->initialized = 1; } } /* get the mapped selector source name */ static int check_mapped_selector_name(struct mixer_build *state, int unitid, int index, char *buf, int buflen) { const struct usbmix_selector_map *p; if (! state->selector_map) return 0; for (p = state->selector_map; p->id; p++) { if (p->id == unitid && index < p->count) return strlcpy(buf, p->names[index], buflen); } return 0; } /* * find an audio control unit with the given unit id */ static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit) { /* we just parse the header */ struct uac_feature_unit_descriptor *hdr = NULL; while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr, USB_DT_CS_INTERFACE)) != NULL) { if (hdr->bLength >= 4 && hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL && hdr->bDescriptorSubtype <= UAC2_SAMPLE_RATE_CONVERTER && hdr->bUnitID == unit) return hdr; } return NULL; } /* * copy a string with the given id */ static int snd_usb_copy_string_desc(struct mixer_build *state, int index, char *buf, int maxlen) { int len = usb_string(state->chip->dev, index, buf, maxlen - 1); buf[len] = 0; return len; } /* * convert from the byte/word on usb descriptor to the zero-based integer */ static int convert_signed_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_U8: val &= 0xff; break; case USB_MIXER_S8: val &= 0xff; if (val >= 0x80) val -= 0x100; break; case USB_MIXER_U16: val &= 0xffff; break; case USB_MIXER_S16: val &= 0xffff; if (val >= 0x8000) val -= 0x10000; break; } return val; } /* * convert from the zero-based int to the byte/word for usb descriptor */ static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_S8: case USB_MIXER_U8: return val & 0xff; case USB_MIXER_S16: case USB_MIXER_U16: return val & 0xffff; } return 0; /* not reached */ } static int get_relative_value(struct usb_mixer_elem_info *cval, int val) { if (! cval->res) cval->res = 1; if (val < cval->min) return 0; else if (val >= cval->max) return (cval->max - cval->min + cval->res - 1) / cval->res; else return (val - cval->min) / cval->res; } static int get_abs_value(struct usb_mixer_elem_info *cval, int val) { if (val < 0) return cval->min; if (! cval->res) cval->res = 1; val *= cval->res; val += cval->min; if (val > cval->max) return cval->max; return val; } /* * retrieve a mixer value */ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2]; int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; int timeout = 10; int idx = 0, err; err = snd_usb_autoresume(cval->mixer->chip); if (err < 0) return -EIO; down_read(&chip->shutdown_rwsem); while (timeout-- > 0) { if (chip->shutdown) break; idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, val_len) >= val_len) { *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); err = 0; goto out; } } snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); err = -EINVAL; out: up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(cval->mixer->chip); return err; } static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */ unsigned char *val; int idx = 0, ret, size; __u8 bRequest; if (request == UAC_GET_CUR) { bRequest = UAC2_CS_CUR; size = sizeof(__u16); } else { bRequest = UAC2_CS_RANGE; size = sizeof(buf); } memset(buf, 0, sizeof(buf)); ret = snd_usb_autoresume(chip) ? -EIO : 0; if (ret) goto error; down_read(&chip->shutdown_rwsem); if (chip->shutdown) ret = -ENODEV; else { idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, size); } up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(chip); if (ret < 0) { error: snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); return ret; } /* FIXME: how should we handle multiple triplets here? */ switch (request) { case UAC_GET_CUR: val = buf; break; case UAC_GET_MIN: val = buf + sizeof(__u16); break; case UAC_GET_MAX: val = buf + sizeof(__u16) * 2; break; case UAC_GET_RES: val = buf + sizeof(__u16) * 3; break; default: return -EINVAL; } *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); return 0; } static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { return (cval->mixer->protocol == UAC_VERSION_1) ? get_ctl_value_v1(cval, request, validx, value_ret) : get_ctl_value_v2(cval, request, validx, value_ret); } static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value) { return get_ctl_value(cval, UAC_GET_CUR, validx, value); } /* channel = 0: master, 1 = first channel */ static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval, int channel, int *value) { return get_ctl_value(cval, UAC_GET_CUR, (cval->control << 8) | channel, value); } static int get_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int *value) { int err; if (cval->cached & (1 << channel)) { *value = cval->cache_val[index]; return 0; } err = get_cur_mix_raw(cval, channel, value); if (err < 0) { if (!cval->mixer->ignore_ctl_error) snd_printd(KERN_ERR "cannot get current value for control %d ch %d: err = %d\n", cval->control, channel, err); return err; } cval->cached |= 1 << channel; cval->cache_val[index] = *value; return 0; } /* * set a mixer value */ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2]; int idx = 0, val_len, err, timeout = 10; if (cval->mixer->protocol == UAC_VERSION_1) { val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; } else { /* UAC_VERSION_2 */ /* audio class v2 controls are always 2 bytes in size */ val_len = sizeof(__u16); /* FIXME */ if (request != UAC_SET_CUR) { snd_printdd(KERN_WARNING "RANGE setting not yet supported\n"); return -EINVAL; } request = UAC2_CS_CUR; } value_set = convert_bytes_value(cval, value_set); buf[0] = value_set & 0xff; buf[1] = (value_set >> 8) & 0xff; err = snd_usb_autoresume(chip); if (err < 0) return -EIO; down_read(&chip->shutdown_rwsem); while (timeout-- > 0) { if (chip->shutdown) break; idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, validx, idx, buf, val_len) >= 0) { err = 0; goto out; } } snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", request, validx, idx, cval->val_type, buf[0], buf[1]); err = -EINVAL; out: up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(chip); return err; } static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value) { return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value); } static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int value) { int err; unsigned int read_only = (channel == 0) ? cval->master_readonly : cval->ch_readonly & (1 << (channel - 1)); if (read_only) { snd_printdd(KERN_INFO "%s(): channel %d of control %d is read_only\n", __func__, channel, cval->control); return 0; } err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel, value); if (err < 0) return err; cval->cached |= 1 << channel; cval->cache_val[index] = value; return 0; } /* * TLV callback for mixer volume controls */ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *_tlv) { struct usb_mixer_elem_info *cval = kcontrol->private_data; DECLARE_TLV_DB_MINMAX(scale, 0, 0); if (size < sizeof(scale)) return -ENOMEM; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) return -EFAULT; return 0; } /* * parser routines begin here... */ static int parse_audio_unit(struct mixer_build *state, int unitid); /* * check if the input/output channel routing is enabled on the given bitmap. * used for mixer unit parser */ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_outs) { int idx = ich * num_outs + och; return bmap[idx >> 3] & (0x80 >> (idx & 7)); } /* * add an alsa control element * search and increment the index until an empty slot is found. * * if failed, give up and free the control instance. */ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, struct snd_kcontrol *kctl) { struct usb_mixer_elem_info *cval = kctl->private_data; int err; while (snd_ctl_find_id(mixer->chip->card, &kctl->id)) kctl->id.index++; if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) { snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); return err; } cval->elem_id = &kctl->id; cval->next_id_elem = mixer->id_elems[cval->id]; mixer->id_elems[cval->id] = cval; return 0; } /* * get a terminal name string */ static struct iterm_name_combo { int type; char *name; } iterm_names[] = { { 0x0300, "Output" }, { 0x0301, "Speaker" }, { 0x0302, "Headphone" }, { 0x0303, "HMD Audio" }, { 0x0304, "Desktop Speaker" }, { 0x0305, "Room Speaker" }, { 0x0306, "Com Speaker" }, { 0x0307, "LFE" }, { 0x0600, "External In" }, { 0x0601, "Analog In" }, { 0x0602, "Digital In" }, { 0x0603, "Line" }, { 0x0604, "Legacy In" }, { 0x0605, "IEC958 In" }, { 0x0606, "1394 DA Stream" }, { 0x0607, "1394 DV Stream" }, { 0x0700, "Embedded" }, { 0x0701, "Noise Source" }, { 0x0702, "Equalization Noise" }, { 0x0703, "CD" }, { 0x0704, "DAT" }, { 0x0705, "DCC" }, { 0x0706, "MiniDisk" }, { 0x0707, "Analog Tape" }, { 0x0708, "Phonograph" }, { 0x0709, "VCR Audio" }, { 0x070a, "Video Disk Audio" }, { 0x070b, "DVD Audio" }, { 0x070c, "TV Tuner Audio" }, { 0x070d, "Satellite Rec Audio" }, { 0x070e, "Cable Tuner Audio" }, { 0x070f, "DSS Audio" }, { 0x0710, "Radio Receiver" }, { 0x0711, "Radio Transmitter" }, { 0x0712, "Multi-Track Recorder" }, { 0x0713, "Synthesizer" }, { 0 }, }; static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm, unsigned char *name, int maxlen, int term_only) { struct iterm_name_combo *names; if (iterm->name) return snd_usb_copy_string_desc(state, iterm->name, name, maxlen); /* virtual type - not a real terminal */ if (iterm->type >> 16) { if (term_only) return 0; switch (iterm->type >> 16) { case UAC_SELECTOR_UNIT: strcpy(name, "Selector"); return 8; case UAC1_PROCESSING_UNIT: strcpy(name, "Process Unit"); return 12; case UAC1_EXTENSION_UNIT: strcpy(name, "Ext Unit"); return 8; case UAC_MIXER_UNIT: strcpy(name, "Mixer"); return 5; default: return sprintf(name, "Unit %d", iterm->id); } } switch (iterm->type & 0xff00) { case 0x0100: strcpy(name, "PCM"); return 3; case 0x0200: strcpy(name, "Mic"); return 3; case 0x0400: strcpy(name, "Headset"); return 7; case 0x0500: strcpy(name, "Phone"); return 5; } for (names = iterm_names; names->type; names++) if (names->type == iterm->type) { strcpy(name, names->name); return strlen(names->name); } return 0; } /* * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int err; void *p1; memset(term, 0, sizeof(*term)); while ((p1 = find_audio_control_unit(state, id)) != NULL) { unsigned char *hdr = p1; term->id = id; switch (hdr[2]) { case UAC_INPUT_TERMINAL: if (state->mixer->protocol == UAC_VERSION_1) { struct uac_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le16_to_cpu(d->wChannelConfig); term->name = d->iTerminal; } else { /* UAC_VERSION_2 */ struct uac2_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le32_to_cpu(d->bmChannelConfig); term->name = d->iTerminal; /* call recursively to get the clock selectors */ err = check_input_term(state, d->bCSourceID, term); if (err < 0) return err; } return 0; case UAC_FEATURE_UNIT: { /* the header is the same for v1 and v2 */ struct uac_feature_unit_descriptor *d = p1; id = d->bSourceID; break; /* continue to parse */ } case UAC_MIXER_UNIT: { struct uac_mixer_unit_descriptor *d = p1; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->channels = uac_mixer_unit_bNrChannels(d); term->chconfig = uac_mixer_unit_wChannelConfig(d, state->mixer->protocol); term->name = uac_mixer_unit_iMixer(d); return 0; } case UAC_SELECTOR_UNIT: case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ err = check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = uac_selector_unit_iSelector(d); return 0; } case UAC1_PROCESSING_UNIT: case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 */ /* UAC2_EFFECT_UNIT */ case UAC2_EXTENSION_UNIT_V2: { struct uac_processing_unit_descriptor *d = p1; if (state->mixer->protocol == UAC_VERSION_2 && hdr[2] == UAC2_EFFECT_UNIT) { /* UAC2/UAC1 unit IDs overlap here in an * uncompatible way. Ignore this unit for now. */ return 0; } if (d->bNrInPins) { id = d->baSourceID[0]; break; /* continue to parse */ } term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->channels = uac_processing_unit_bNrChannels(d); term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol); term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol); return 0; } case UAC2_CLOCK_SOURCE: { struct uac_clock_source_descriptor *d = p1; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = d->iClockSource; return 0; } default: return -ENODEV; } } return -ENODEV; } /* * Feature Unit */ /* feature unit control information */ struct usb_feature_control_info { const char *name; unsigned int type; /* control type (mute, volume, etc.) */ }; static struct usb_feature_control_info audio_feature_info[] = { { "Mute", USB_MIXER_INV_BOOLEAN }, { "Volume", USB_MIXER_S16 }, { "Tone Control - Bass", USB_MIXER_S8 }, { "Tone Control - Mid", USB_MIXER_S8 }, { "Tone Control - Treble", USB_MIXER_S8 }, { "Graphic Equalizer", USB_MIXER_S8 }, /* FIXME: not implemeted yet */ { "Auto Gain Control", USB_MIXER_BOOLEAN }, { "Delay Control", USB_MIXER_U16 }, { "Bass Boost", USB_MIXER_BOOLEAN }, { "Loudness", USB_MIXER_BOOLEAN }, /* UAC2 specific */ { "Input Gain Control", USB_MIXER_U16 }, { "Input Gain Pad Control", USB_MIXER_BOOLEAN }, { "Phase Inverter Control", USB_MIXER_BOOLEAN }, }; /* private_free callback */ static void usb_mixer_elem_free(struct snd_kcontrol *kctl) { kfree(kctl->private_data); kctl->private_data = NULL; } /* * interface to ALSA control for feature/mixer units */ /* volume control quirks */ static void volume_control_quirks(struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { switch (cval->mixer->chip->usb_id) { case USB_ID(0x0471, 0x0101): case USB_ID(0x0471, 0x0104): case USB_ID(0x0471, 0x0105): case USB_ID(0x0672, 0x1041): /* quirk for UDA1321/N101. * note that detection between firmware 2.1.1.7 (N101) * and later 2.1.1.21 is not very clear from datasheets. * I hope that the min value is -15360 for newer firmware --jk */ if (!strcmp(kctl->id.name, "PCM Playback Volume") && cval->min == -15616) { snd_printk(KERN_INFO "set volume quirk for UDA1321/N101 chip\n"); cval->max = -256; } break; case USB_ID(0x046d, 0x09a4): if (!strcmp(kctl->id.name, "Mic Capture Volume")) { snd_printk(KERN_INFO "set volume quirk for QuickCam E3500\n"); cval->min = 6080; cval->max = 8768; cval->res = 192; } break; case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. * Proboly there is some logitech magic behind this number --fishor */ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { snd_printk(KERN_INFO "set resolution quirk: cval->res = 384\n"); cval->res = 384; } break; } } /* * retrieve the minimum and maximum values for the specified control */ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, int default_min, struct snd_kcontrol *kctl) { /* for failsafe */ cval->min = default_min; cval->max = cval->min + 1; cval->res = 1; cval->dBmin = cval->dBmax = 0; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { cval->initialized = 1; } else { int minchn = 0; if (cval->cmask) { int i; for (i = 0; i < MAX_CHANNELS; i++) if (cval->cmask & (1 << i)) { minchn = i + 1; break; } } if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 || get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) { snd_printd(KERN_ERR "%d:%d: cannot get min/max values for control %d (id %d)\n", cval->id, snd_usb_ctrl_intf(cval->mixer->chip), cval->control, cval->id); return -EINVAL; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) { cval->res = 1; } else { int last_valid_res = cval->res; while (cval->res > 1) { if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES, (cval->control << 8) | minchn, cval->res / 2) < 0) break; cval->res /= 2; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) cval->res = last_valid_res; } if (cval->res == 0) cval->res = 1; /* Additional checks for the proper resolution * * Some devices report smaller resolutions than actually * reacting. They don't return errors but simply clip * to the lower aligned value. */ if (cval->min + cval->res < cval->max) { int last_valid_res = cval->res; int saved, test, check; get_cur_mix_raw(cval, minchn, &saved); for (;;) { test = saved; if (test < cval->max) test += cval->res; else test -= cval->res; if (test < cval->min || test > cval->max || set_cur_mix_value(cval, minchn, 0, test) || get_cur_mix_raw(cval, minchn, &check)) { cval->res = last_valid_res; break; } if (test == check) break; cval->res *= 2; } set_cur_mix_value(cval, minchn, 0, saved); } cval->initialized = 1; } if (kctl) volume_control_quirks(cval, kctl); /* USB descriptions contain the dB scale in 1/256 dB unit * while ALSA TLV contains in 1/100 dB unit */ cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256; cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256; if (cval->dBmin > cval->dBmax) { /* something is wrong; assume it's either from/to 0dB */ if (cval->dBmin < 0) cval->dBmax = 0; else if (cval->dBmin > 0) cval->dBmin = 0; if (cval->dBmin > cval->dBmax) { /* totally crap, return an error */ return -EINVAL; } } return 0; } #define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL) /* get a feature/mixer unit info */ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = cval->channels; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; } else { if (!cval->initialized) { get_min_max_with_quirks(cval, 0, kcontrol); if (cval->initialized && cval->dBmin >= cval->dBmax) { kcontrol->vd[0].access &= ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK); snd_ctl_notify(cval->mixer->chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kcontrol->id); } } uinfo->value.integer.min = 0; uinfo->value.integer.max = (cval->max - cval->min + cval->res - 1) / cval->res; } return 0; } /* get the current value from feature/mixer unit */ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int c, cnt, val, err; ucontrol->value.integer.value[0] = cval->min; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & (1 << c))) continue; err = get_cur_mix_value(cval, c + 1, cnt, &val); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = get_relative_value(cval, val); ucontrol->value.integer.value[cnt] = val; cnt++; } return 0; } else { /* master channel */ err = get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; } return 0; } /* put the current value to feature/mixer unit */ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int c, cnt, val, oval, err; int changed = 0; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & (1 << c))) continue; err = get_cur_mix_value(cval, c + 1, cnt, &oval); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = ucontrol->value.integer.value[cnt]; val = get_abs_value(cval, val); if (oval != val) { set_cur_mix_value(cval, c + 1, cnt, val); changed = 1; } cnt++; } } else { /* master channel */ err = get_cur_mix_value(cval, 0, 0, &oval); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = ucontrol->value.integer.value[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_mix_value(cval, 0, 0, val); changed = 1; } } return changed; } static struct snd_kcontrol_new usb_feature_unit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = mixer_ctl_feature_put, }; /* the read-only variant */ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = NULL, }; /* This symbol is exported in order to allow the mixer quirks to * hook up to the standard feature unit control mechanism */ struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl; /* * build a feature control */ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str) { return strlcat(kctl->id.name, str, sizeof(kctl->id.name)); } static void build_feature_ctl(struct mixer_build *state, void *raw_desc, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, int unitid, int readonly_mask) { struct uac_feature_unit_descriptor *desc = raw_desc; unsigned int len = 0; int mapped_name = 0; int nameid = uac_feature_unit_iFeature(desc); struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; unsigned int range; control++; /* change from zero-based to 1-based value */ if (control == UAC_FU_GRAPHIC_EQUALIZER) { /* FIXME: not supported yet */ return; } map = find_map(state, unitid, control); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return; } cval->mixer = state->mixer; cval->id = unitid; cval->control = control; cval->cmask = ctl_mask; cval->val_type = audio_feature_info[control-1].type; if (ctl_mask == 0) { cval->channels = 1; /* master channel */ cval->master_readonly = readonly_mask; } else { int i, c = 0; for (i = 0; i < 16; i++) if (ctl_mask & (1 << i)) c++; cval->channels = c; cval->ch_readonly = readonly_mask; } /* if all channels in the mask are marked read-only, make the control * read-only. set_cur_mix_value() will check the mask again and won't * issue write commands to read-only channels. */ if (cval->channels == readonly_mask) kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval); else kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return; } kctl->private_free = usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); mapped_name = len != 0; if (! len && nameid) len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); switch (control) { case UAC_FU_MUTE: case UAC_FU_VOLUME: /* determine the control name. the rule is: * - if a name id is given in descriptor, use it. * - if the connected input can be determined, then use the name * of terminal type. * - if the connected output can be determined, use it. * - otherwise, anonymous name. */ if (! len) { len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 1); if (! len) len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 1); if (! len) len = snprintf(kctl->id.name, sizeof(kctl->id.name), "Feature %d", unitid); } /* determine the stream direction: * if the connected output is USB stream, then it's likely a * capture stream. otherwise it should be playback (hopefully :) */ if (! mapped_name && ! (state->oterm.type >> 16)) { if ((state->oterm.type & 0xff00) == 0x0100) { len = append_ctl_name(kctl, " Capture"); } else { len = append_ctl_name(kctl, " Playback"); } } append_ctl_name(kctl, control == UAC_FU_MUTE ? " Switch" : " Volume"); if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { kctl->tlv.c = mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } } break; default: if (! len) strlcpy(kctl->id.name, audio_feature_info[control-1].name, sizeof(kctl->id.name)); break; } range = (cval->max - cval->min) / cval->res; /* Are there devices with volume range more than 255? I use a bit more * to be sure. 384 is a resolution magic number found on Logitech * devices. It will definitively catch all buggy Logitech devices. */ if (range > 384) { snd_printk(KERN_WARNING "usb_audio: Warning! Unlikely big " "volume range (=%u), cval->res is probably wrong.", range); snd_printk(KERN_WARNING "usb_audio: [%d] FU [%s] ch = %d, " "val = %d/%d/%d", cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); } snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); snd_usb_mixer_add_control(state->mixer, kctl); } /* * parse a feature unit * * most of controls are defined here. */ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void *_ftr) { int channels, i, j; struct usb_audio_term iterm; unsigned int master_bits, first_ch_bits; int err, csize; struct uac_feature_unit_descriptor *hdr = _ftr; __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { csize = hdr->bControlSize; if (!csize) { snd_printdd(KERN_ERR "usbaudio: unit %u: " "invalid bControlSize == 0\n", unitid); return -EINVAL; } channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; if (hdr->bLength < 7 + csize) { snd_printk(KERN_ERR "usbaudio: unit %u: " "invalid UAC_FEATURE_UNIT descriptor\n", unitid); return -EINVAL; } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; if (hdr->bLength < 6 + csize) { snd_printk(KERN_ERR "usbaudio: unit %u: " "invalid UAC_FEATURE_UNIT descriptor\n", unitid); return -EINVAL; } } /* parse the source unit */ if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0) return err; /* determine the input source type and name */ err = check_input_term(state, hdr->bSourceID, &iterm); if (err < 0) return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ switch (state->chip->usb_id) { case USB_ID(0x08bb, 0x2702): snd_printk(KERN_INFO "usbmixer: master volume quirk for PCM2702 chip\n"); /* disable non-functional volume control */ master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; case USB_ID(0x1130, 0xf211): snd_printk(KERN_INFO "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n"); /* disable non-functional volume control */ channels = 0; break; } if (channels > 0) first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize); else first_ch_bits = 0; if (state->mixer->protocol == UAC_VERSION_1) { /* check all control types */ for (i = 0; i < 10; i++) { unsigned int ch_bits = 0; for (j = 0; j < channels; j++) { unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (mask & (1 << i)) ch_bits |= (1 << j); } /* audio class v1 controls are never read-only */ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, 0); if (master_bits & (1 << i)) build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 0); } } else { /* UAC_VERSION_2 */ for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) { unsigned int ch_bits = 0; unsigned int ch_read_only = 0; for (j = 0; j < channels; j++) { unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (uac2_control_is_readable(mask, i)) { ch_bits |= (1 << j); if (!uac2_control_is_writeable(mask, i)) ch_read_only |= (1 << j); } } /* NOTE: build_feature_ctl() will mark the control read-only if all channels * are marked read-only in the descriptors. Otherwise, the control will be * reported as writeable, but the driver will not actually issue a write * command for read-only channels */ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, ch_read_only); if (uac2_control_is_readable(master_bits, i)) build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, !uac2_control_is_writeable(master_bits, i)); } } return 0; } /* * Mixer Unit */ /* * build a mixer unit control * * the callbacks are identical with feature unit. * input channel number (zero based) is given in control field instead. */ static void build_mixer_unit_ctl(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc, int in_pin, int in_ch, int unitid, struct usb_audio_term *iterm) { struct usb_mixer_elem_info *cval; unsigned int num_outs = uac_mixer_unit_bNrChannels(desc); unsigned int i, len; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; map = find_map(state, unitid, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) return; cval->mixer = state->mixer; cval->id = unitid; cval->control = in_ch + 1; /* based on 1 */ cval->val_type = USB_MIXER_S16; for (i = 0; i < num_outs; i++) { if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), in_ch, i, num_outs)) { cval->cmask |= (1 << i); cval->channels++; } } /* get min/max values */ get_min_max(cval, 0); kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return; } kctl->private_free = usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (! len) len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0); if (! len) len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1); append_ctl_name(kctl, " Volume"); snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max); snd_usb_mixer_add_control(state->mixer, kctl); } /* * parse a mixer unit */ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_mixer_unit_descriptor *desc = raw_desc; struct usb_audio_term iterm; int input_pins, num_ins, num_outs; int pin, ich, err; if (desc->bLength < 11 || ! (input_pins = desc->bNrInPins) || ! (num_outs = uac_mixer_unit_bNrChannels(desc))) { snd_printk(KERN_ERR "invalid MIXER UNIT descriptor %d\n", unitid); return -EINVAL; } /* no bmControls field (e.g. Maya44) -> ignore */ if (desc->bLength <= 10 + input_pins) { snd_printdd(KERN_INFO "MU %d has no bmControls field\n", unitid); return 0; } num_ins = 0; ich = 0; for (pin = 0; pin < input_pins; pin++) { err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) return err; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; num_ins += iterm.channels; for (; ich < num_ins; ++ich) { int och, ich_has_controls = 0; for (och = 0; och < num_outs; ++och) { if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), ich, och, num_outs)) { ich_has_controls = 1; break; } } if (ich_has_controls) build_mixer_unit_ctl(state, desc, pin, ich, unitid, &iterm); } } return 0; } /* * Processing Unit / Extension Unit */ /* get callback for processing/extension unit */ static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int err, val; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0 && cval->mixer->ignore_ctl_error) { ucontrol->value.integer.value[0] = cval->min; return 0; } if (err < 0) return err; val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; return 0; } /* put callback for processing/extension unit */ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) { if (cval->mixer->ignore_ctl_error) return 0; return err; } val = ucontrol->value.integer.value[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for processing/extension unit */ static struct snd_kcontrol_new mixer_procunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_feature_info, .get = mixer_ctl_procunit_get, .put = mixer_ctl_procunit_put, }; /* * predefined data for processing units */ struct procunit_value_info { int control; char *suffix; int val_type; int min_value; }; struct procunit_info { int type; char *name; struct procunit_value_info *values; }; static struct procunit_value_info updown_proc_info[] = { { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info prologic_proc_info[] = { { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info threed_enh_proc_info[] = { { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info reverb_proc_info[] = { { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info chorus_proc_info[] = { { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; static struct procunit_value_info dcr_proc_info[] = { { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, { UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, { UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 }, { UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 }, { 0 } }; static struct procunit_info procunits[] = { { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, { UAC_PROCESS_REVERB, "Reverb", reverb_proc_info }, { UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info }, { UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info }, { 0 }, }; /* * predefined data for extension units */ static struct procunit_value_info clock_rate_xu_info[] = { { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 }, { 0 } }; static struct procunit_value_info clock_source_xu_info[] = { { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_value_info spdif_format_xu_info[] = { { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_value_info soft_limit_xu_info[] = { { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_info extunits[] = { { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info }, { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info }, { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info }, { USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info }, { 0 } }; /* * build a processing/extension unit */ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, struct procunit_info *list, char *name) { struct uac_processing_unit_descriptor *desc = raw_desc; int num_ins = desc->bNrInPins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len; struct procunit_info *info; struct procunit_value_info *valinfo; const struct usbmix_name_map *map; static struct procunit_value_info default_value_info[] = { { 0x01, "Switch", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_info default_info = { 0, NULL, default_value_info }; if (desc->bLength < 13 || desc->bLength < 13 + num_ins || desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { snd_printk(KERN_ERR "invalid %s descriptor (id %d)\n", name, unitid); return -EINVAL; } for (i = 0; i < num_ins; i++) { if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0) return err; } type = le16_to_cpu(desc->wProcessType); for (info = list; info && info->type; info++) if (info->type == type) break; if (! info || ! info->type) info = &default_info; for (valinfo = info->values; valinfo->control; valinfo++) { __u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol); if (! (controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1)))) continue; map = find_map(state, unitid, valinfo->control); if (check_ignored_ctl(map)) continue; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return -ENOMEM; } cval->mixer = state->mixer; cval->id = unitid; cval->control = valinfo->control; cval->val_type = valinfo->val_type; cval->channels = 1; /* get min/max values */ if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) { __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol); /* FIXME: hard-coded */ cval->min = 1; cval->max = control_spec[0]; cval->res = 1; cval->initialized = 1; } else { if (type == USB_XU_CLOCK_RATE) { /* E-Mu USB 0404/0202/TrackerPre/0204 * samplerate control quirk */ cval->min = 0; cval->max = 5; cval->res = 1; cval->initialized = 1; } else get_min_max(cval, valinfo->min_value); } kctl = snd_ctl_new1(&mixer_procunit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return -ENOMEM; } kctl->private_free = usb_mixer_elem_free; if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) /* nothing */ ; else if (info->name) strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name)); else { nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol); len = 0; if (nameid) len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); if (! len) strlcpy(kctl->id.name, name, sizeof(kctl->id.name)); } append_ctl_name(kctl, " "); append_ctl_name(kctl, valinfo->suffix); snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max); if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) return err; } return 0; } static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc) { return build_audio_procunit(state, unitid, raw_desc, procunits, "Processing Unit"); } static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc) { /* Note that we parse extension units with processing unit descriptors. * That's ok as the layout is the same */ return build_audio_procunit(state, unitid, raw_desc, extunits, "Extension Unit"); } /* * Selector Unit */ /* info callback for selector unit * use an enumerator type for routing */ static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; const char **itemlist = (const char **)kcontrol->private_value; if (snd_BUG_ON(!itemlist)) return -EINVAL; return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist); } /* get callback for selector unit */ static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, err; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { if (cval->mixer->ignore_ctl_error) { ucontrol->value.enumerated.item[0] = 0; return 0; } return err; } val = get_relative_value(cval, val); ucontrol->value.enumerated.item[0] = val; return 0; } /* put callback for selector unit */ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) { if (cval->mixer->ignore_ctl_error) return 0; return err; } val = ucontrol->value.enumerated.item[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for selector unit */ static struct snd_kcontrol_new mixer_selectunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_selector_info, .get = mixer_ctl_selector_get, .put = mixer_ctl_selector_put, }; /* private free callback. * free both private_data and private_value */ static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl) { int i, num_ins = 0; if (kctl->private_data) { struct usb_mixer_elem_info *cval = kctl->private_data; num_ins = cval->max; kfree(cval); kctl->private_data = NULL; } if (kctl->private_value) { char **itemlist = (char **)kctl->private_value; for (i = 0; i < num_ins; i++) kfree(itemlist[i]); kfree(itemlist); kctl->private_value = 0; } } /* * parse a selector unit */ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_selector_unit_descriptor *desc = raw_desc; unsigned int i, nameid, len; int err; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; char **namelist; if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { snd_printk(KERN_ERR "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; } for (i = 0; i < desc->bNrInPins; i++) { if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0) return err; } if (desc->bNrInPins == 1) /* only one ? nonsense! */ return 0; map = find_map(state, unitid, 0); if (check_ignored_ctl(map)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return -ENOMEM; } cval->mixer = state->mixer; cval->id = unitid; cval->val_type = USB_MIXER_U8; cval->channels = 1; cval->min = 1; cval->max = desc->bNrInPins; cval->res = 1; cval->initialized = 1; if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) cval->control = UAC2_CX_CLOCK_SELECTOR; else cval->control = 0; namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL); if (! namelist) { snd_printk(KERN_ERR "cannot malloc\n"); kfree(cval); return -ENOMEM; } #define MAX_ITEM_NAME_LEN 64 for (i = 0; i < desc->bNrInPins; i++) { struct usb_audio_term iterm; len = 0; namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); if (! namelist[i]) { snd_printk(KERN_ERR "cannot malloc\n"); while (i--) kfree(namelist[i]); kfree(namelist); kfree(cval); return -ENOMEM; } len = check_mapped_selector_name(state, unitid, i, namelist[i], MAX_ITEM_NAME_LEN); if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0) len = get_term_name(state, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0); if (! len) sprintf(namelist[i], "Input %d", i); } kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(namelist); kfree(cval); return -ENOMEM; } kctl->private_value = (unsigned long)namelist; kctl->private_free = usb_mixer_selector_elem_free; nameid = uac_selector_unit_iSelector(desc); len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (len) ; else if (nameid) snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); else { len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 0); if (! len) strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); else if ((state->oterm.type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture Source"); else append_ctl_name(kctl, " Playback Source"); } snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", cval->id, kctl->id.name, desc->bNrInPins); if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) return err; return 0; } /* * parse an audio unit recursively */ static int parse_audio_unit(struct mixer_build *state, int unitid) { unsigned char *p1; if (test_and_set_bit(unitid, state->unitbitmap)) return 0; /* the unit already visited */ p1 = find_audio_control_unit(state, unitid); if (!p1) { snd_printk(KERN_ERR "usbaudio: unit %d not found!\n", unitid); return -EINVAL; } switch (p1[2]) { case UAC_INPUT_TERMINAL: case UAC2_CLOCK_SOURCE: return 0; /* NOP */ case UAC_MIXER_UNIT: return parse_audio_mixer_unit(state, unitid, p1); case UAC_SELECTOR_UNIT: case UAC2_CLOCK_SELECTOR: return parse_audio_selector_unit(state, unitid, p1); case UAC_FEATURE_UNIT: return parse_audio_feature_unit(state, unitid, p1); case UAC1_PROCESSING_UNIT: /* UAC2_EFFECT_UNIT has the same value */ if (state->mixer->protocol == UAC_VERSION_1) return parse_audio_processing_unit(state, unitid, p1); else return 0; /* FIXME - effect units not implemented yet */ case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 has the same value */ if (state->mixer->protocol == UAC_VERSION_1) return parse_audio_extension_unit(state, unitid, p1); else /* UAC_VERSION_2 */ return parse_audio_processing_unit(state, unitid, p1); case UAC2_EXTENSION_UNIT_V2: return parse_audio_extension_unit(state, unitid, p1); default: snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; } } static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); usb_free_urb(mixer->urb); } usb_free_urb(mixer->rc_urb); kfree(mixer->rc_setup_packet); kfree(mixer); } static int snd_usb_mixer_dev_free(struct snd_device *device) { struct usb_mixer_interface *mixer = device->device_data; snd_usb_mixer_free(mixer); return 0; } /* * create mixer controls * * walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers */ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) { struct mixer_build state; int err; const struct usbmix_ctl_map *map; void *p; memset(&state, 0, sizeof(state)); state.chip = mixer->chip; state.mixer = mixer; state.buffer = mixer->hostif->extra; state.buflen = mixer->hostif->extralen; /* check the mapping table */ for (map = usbmix_ctl_maps; map->id; map++) { if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; mixer->ignore_ctl_error = map->ignore_ctl_error; break; } } p = NULL; while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) { if (mixer->protocol == UAC_VERSION_1) { struct uac1_output_terminal_descriptor *desc = p; if (desc->bLength < sizeof(*desc)) continue; /* invalid descriptor? */ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */ state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; } else { /* UAC_VERSION_2 */ struct uac2_output_terminal_descriptor *desc = p; if (desc->bLength < sizeof(*desc)) continue; /* invalid descriptor? */ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */ state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* for UAC2, use the same approach to also add the clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; } } return 0; } void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_info *info; for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, info->elem_id); } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, int unitid, struct usb_mixer_elem_info *cval) { static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16"}; snd_iprintf(buffer, " Unit: %i\n", unitid); if (cval->elem_id) snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n", cval->elem_id->name, cval->elem_id->index); snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->id, cval->control, cval->cmask, cval->channels, val_types[cval->val_type]); snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n", cval->min, cval->max, cval->dBmin, cval->dBmax); } static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; struct usb_mixer_interface *mixer; struct usb_mixer_elem_info *cval; int unitid; list_for_each_entry(mixer, &chip->mixer_list, list) { snd_iprintf(buffer, "USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n", chip->usb_id, snd_usb_ctrl_intf(chip), mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { for (cval = mixer->id_elems[unitid]; cval; cval = cval->next_id_elem) snd_usb_mixer_dump_cval(buffer, unitid, cval); } } } static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, int attribute, int value, int index) { struct usb_mixer_elem_info *info; __u8 unitid = (index >> 8) & 0xff; __u8 control = (value >> 8) & 0xff; __u8 channel = value & 0xff; if (channel >= MAX_CHANNELS) { snd_printk(KERN_DEBUG "%s(): bogus channel number %d\n", __func__, channel); return; } for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) { if (info->control != control) continue; switch (attribute) { case UAC2_CS_CUR: /* invalidate cache, so the value is read from the device */ if (channel) info->cached &= ~(1 << channel); else /* master channel */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, info->elem_id); break; case UAC2_CS_RANGE: /* TODO */ break; case UAC2_CS_MEM: /* TODO */ break; default: snd_printk(KERN_DEBUG "unknown attribute %d in interrupt\n", attribute); break; } /* switch */ } } static void snd_usb_mixer_interrupt(struct urb *urb) { struct usb_mixer_interface *mixer = urb->context; int len = urb->actual_length; int ustatus = urb->status; if (ustatus != 0) goto requeue; if (mixer->protocol == UAC_VERSION_1) { struct uac1_status_word *status; for (status = urb->transfer_buffer; len >= sizeof(*status); len -= sizeof(*status), status++) { snd_printd(KERN_DEBUG "status interrupt: %02x %02x\n", status->bStatusType, status->bOriginator); /* ignore any notifications not from the control interface */ if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) != UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF) continue; if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED) snd_usb_mixer_rc_memory_change(mixer, status->bOriginator); else snd_usb_mixer_notify_id(mixer, status->bOriginator); } } else { /* UAC_VERSION_2 */ struct uac2_interrupt_data_msg *msg; for (msg = urb->transfer_buffer; len >= sizeof(*msg); len -= sizeof(*msg), msg++) { /* drop vendor specific and endpoint requests */ if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) || (msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP)) continue; snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute, le16_to_cpu(msg->wValue), le16_to_cpu(msg->wIndex)); } } requeue: if (ustatus != -ENOENT && ustatus != -ECONNRESET && ustatus != -ESHUTDOWN) { urb->dev = mixer->chip->dev; usb_submit_urb(urb, GFP_ATOMIC); } } /* stop any bus activity of a mixer */ void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) { usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); } int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) { int err; if (mixer->urb) { err = usb_submit_urb(mixer->urb, GFP_NOIO); if (err < 0) return err; } return 0; } /* create the handler for the optional status interrupt endpoint */ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) { struct usb_endpoint_descriptor *ep; void *transfer_buffer; int buffer_length; unsigned int epnum; /* we need one interrupt input endpoint */ if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1) return 0; ep = get_endpoint(mixer->hostif, 0); if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep)) return 0; epnum = usb_endpoint_num(ep); buffer_length = le16_to_cpu(ep->wMaxPacketSize); transfer_buffer = kmalloc(buffer_length, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; mixer->urb = usb_alloc_urb(0, GFP_KERNEL); if (!mixer->urb) { kfree(transfer_buffer); return -ENOMEM; } usb_fill_int_urb(mixer->urb, mixer->chip->dev, usb_rcvintpipe(mixer->chip->dev, epnum), transfer_buffer, buffer_length, snd_usb_mixer_interrupt, mixer, ep->bInterval); usb_submit_urb(mixer->urb, GFP_KERNEL); return 0; } int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, int ignore_error) { static struct snd_device_ops dev_ops = { .dev_free = snd_usb_mixer_dev_free }; struct usb_mixer_interface *mixer; struct snd_info_entry *entry; int err; strcpy(chip->card->mixername, "USB Mixer"); mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return -ENOMEM; mixer->chip = chip; mixer->ignore_ctl_error = ignore_error; mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems), GFP_KERNEL); if (!mixer->id_elems) { kfree(mixer); return -ENOMEM; } mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) { case UAC_VERSION_1: default: mixer->protocol = UAC_VERSION_1; break; case UAC_VERSION_2: mixer->protocol = UAC_VERSION_2; break; } if ((err = snd_usb_mixer_controls(mixer)) < 0 || (err = snd_usb_mixer_status_create(mixer)) < 0) goto _error; snd_usb_mixer_apply_create_quirk(mixer); err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops); if (err < 0) goto _error; if (list_empty(&chip->mixer_list) && !snd_card_proc_new(chip->card, "usbmixer", &entry)) snd_info_set_text_ops(entry, chip, snd_usb_mixer_proc_read); list_add(&mixer->list, &chip->mixer_list); return 0; _error: snd_usb_mixer_free(mixer); return err; } void snd_usb_mixer_disconnect(struct list_head *p) { struct usb_mixer_interface *mixer; mixer = list_entry(p, struct usb_mixer_interface, list); usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); }
gpl-2.0
skristiansson/eco32-linux
drivers/gpu/drm/qxl/qxl_ttm.c
167
15421
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alon Levy */ #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_page_alloc.h> #include <ttm/ttm_module.h> #include <drm/drmP.h> #include <drm/drm.h> #include <drm/qxl_drm.h> #include "qxl_drv.h" #include "qxl_object.h" #include <linux/delay.h> static int qxl_ttm_debugfs_init(struct qxl_device *qdev); static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) { struct qxl_mman *mman; struct qxl_device *qdev; mman = container_of(bdev, struct qxl_mman, bdev); qdev = container_of(mman, struct qxl_device, mman); return qdev; } static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } static int qxl_ttm_global_init(struct qxl_device *qdev) { struct drm_global_reference *global_ref; int r; qdev->mman.mem_global_referenced = false; global_ref = &qdev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &qxl_ttm_mem_global_init; global_ref->release = &qxl_ttm_mem_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); return r; } qdev->mman.bo_global_ref.mem_glob = qdev->mman.mem_global_ref.object; global_ref = &qdev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); drm_global_item_unref(&qdev->mman.mem_global_ref); return r; } qdev->mman.mem_global_referenced = true; return 0; } static void qxl_ttm_global_fini(struct qxl_device *qdev) { if (qdev->mman.mem_global_referenced) { drm_global_item_unref(&qdev->mman.bo_global_ref.ref); drm_global_item_unref(&qdev->mman.mem_global_ref); qdev->mman.mem_global_referenced = false; } } static struct vm_operations_struct qxl_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops; static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct qxl_device *qdev; int r; bo = (struct ttm_buffer_object *)vma->vm_private_data; if (bo == NULL) return VM_FAULT_NOPAGE; qdev = qxl_get_qdev(bo->bdev); r = ttm_vm_ops->fault(vma, vmf); return r; } int qxl_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct qxl_device *qdev; int r; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", __func__, vma->vm_pgoff); return drm_mmap(filp, vma); } file_priv = filp->private_data; qdev = file_priv->minor->dev->dev_private; if (qdev == NULL) { DRM_ERROR( "filp->private_data->minor->dev->dev_private == NULL\n"); return -EINVAL; } QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", __func__, filp->private_data, vma->vm_pgoff); r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); if (unlikely(r != 0)) return r; if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; qxl_ttm_vm_ops = *ttm_vm_ops; qxl_ttm_vm_ops.fault = &qxl_ttm_fault; } vma->vm_ops = &qxl_ttm_vm_ops; return 0; } static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { struct qxl_device *qdev; qdev = qxl_get_qdev(bdev); switch (type) { case TTM_PL_SYSTEM: /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: case TTM_PL_PRIV0: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static void qxl_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct qxl_bo *qbo; static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!qxl_ttm_bo_is_qxl_bo(bo)) { placement->fpfn = 0; placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; placement->num_busy_placement = 1; return; } qbo = container_of(bo, struct qxl_bo, tbo); qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); *placement = qbo->placement; } static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct qxl_bo *qbo = to_qxl_bo(bo); return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp); } static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct qxl_device *qdev = qxl_get_qdev(bdev); mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_VRAM: mem->bus.is_iomem = true; mem->bus.base = qdev->vram_base; mem->bus.offset = mem->start << PAGE_SHIFT; break; case TTM_PL_PRIV0: mem->bus.is_iomem = true; mem->bus.base = qdev->surfaceram_base; mem->bus.offset = mem->start << PAGE_SHIFT; break; default: return -EINVAL; } return 0; } static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } /* * TTM backend functions. */ struct qxl_ttm_tt { struct ttm_dma_tt ttm; struct qxl_device *qdev; u64 offset; }; static int qxl_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct qxl_ttm_tt *gtt = (void *)ttm; gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } /* Not implemented */ return -1; } static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) { /* Not implemented */ return -1; } static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) { struct qxl_ttm_tt *gtt = (void *)ttm; ttm_dma_tt_fini(&gtt->ttm); kfree(gtt); } static struct ttm_backend_func qxl_backend_func = { .bind = &qxl_ttm_backend_bind, .unbind = &qxl_ttm_backend_unbind, .destroy = &qxl_ttm_backend_destroy, }; static int qxl_ttm_tt_populate(struct ttm_tt *ttm) { int r; if (ttm->state != tt_unpopulated) return 0; r = ttm_pool_populate(ttm); if (r) return r; return 0; } static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm) { ttm_pool_unpopulate(ttm); } static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { struct qxl_device *qdev; struct qxl_ttm_tt *gtt; qdev = qxl_get_qdev(bdev); gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); if (gtt == NULL) return NULL; gtt->ttm.ttm.func = &qxl_backend_func; gtt->qdev = qdev; if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { kfree(gtt); return NULL; } return &gtt->ttm.ttm; } static void qxl_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; new_mem->mm_node = NULL; } static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_move_null(bo, new_mem); return 0; } return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); } static int qxl_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) { struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; int count = 0, sc = 0; struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); if (qfence->num_active_releases == 0) return 0; retry: if (sc == 0) { if (bo->type == QXL_GEM_DOMAIN_SURFACE) qxl_update_surface(qfence->qdev, bo); } else if (sc >= 1) { qxl_io_notify_oom(qfence->qdev); } sc++; for (count = 0; count < 10; count++) { bool ret; ret = qxl_queue_garbage_collect(qfence->qdev, true); if (ret == false) break; if (qfence->num_active_releases == 0) return 0; } if (qfence->num_active_releases) { bool have_drawable_releases = false; void **slot; struct radix_tree_iter iter; int release_id; radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) { struct qxl_release *release; release_id = iter.index; release = qxl_release_from_id_locked(qfence->qdev, release_id); if (release == NULL) continue; if (release->type == QXL_RELEASE_DRAWABLE) have_drawable_releases = true; } qxl_queue_garbage_collect(qfence->qdev, true); if (have_drawable_releases || sc < 4) { if (sc > 2) /* back off */ usleep_range(500, 1000); if (have_drawable_releases && sc > 300) { WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases); return -EBUSY; } goto retry; } } return 0; } static int qxl_sync_obj_flush(void *sync_obj) { return 0; } static void qxl_sync_obj_unref(void **sync_obj) { } static void *qxl_sync_obj_ref(void *sync_obj) { return sync_obj; } static bool qxl_sync_obj_signaled(void *sync_obj) { struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; return (qfence->num_active_releases == 0); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct qxl_bo *qbo; struct qxl_device *qdev; if (!qxl_ttm_bo_is_qxl_bo(bo)) return; qbo = container_of(bo, struct qxl_bo, tbo); qdev = qbo->gem_base.dev->dev_private; if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id) qxl_surface_evict(qdev, qbo, new_mem ? true : false); } static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_populate = &qxl_ttm_tt_populate, .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, .invalidate_caches = &qxl_invalidate_caches, .init_mem_type = &qxl_init_mem_type, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, .sync_obj_signaled = &qxl_sync_obj_signaled, .sync_obj_wait = &qxl_sync_obj_wait, .sync_obj_flush = &qxl_sync_obj_flush, .sync_obj_unref = &qxl_sync_obj_unref, .sync_obj_ref = &qxl_sync_obj_ref, .move_notify = &qxl_bo_move_notify, }; int qxl_ttm_init(struct qxl_device *qdev) { int r; int num_io_pages; /* != rom->num_io_pages, we include surface0 */ r = qxl_ttm_global_init(qdev); if (r) return r; /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&qdev->mman.bdev, qdev->mman.bo_global_ref.ref.object, &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } /* NOTE: this includes the framebuffer (aka surface 0) */ num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE; r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM, num_io_pages); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0, qdev->surfaceram_size / PAGE_SIZE); if (r) { DRM_ERROR("Failed initializing Surfaces heap.\n"); return r; } DRM_INFO("qxl: %uM of VRAM memory size\n", (unsigned)qdev->vram_size / (1024 * 1024)); DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); DRM_INFO("qxl: %uM of Surface memory size\n", (unsigned)qdev->surfaceram_size / (1024 * 1024)); if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; r = qxl_ttm_debugfs_init(qdev); if (r) { DRM_ERROR("Failed to init debugfs\n"); return r; } return 0; } void qxl_ttm_fini(struct qxl_device *qdev) { ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0); ttm_bo_device_release(&qdev->mman.bdev); qxl_ttm_global_fini(qdev); DRM_INFO("qxl: ttm finalized\n"); } #define QXL_DEBUGFS_MEM_TYPES 2 #if defined(CONFIG_DEBUG_FS) static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct qxl_device *rdev = dev->dev_private; int ret; struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); return ret; } #endif static int qxl_ttm_debugfs_init(struct qxl_device *qdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; unsigned i; for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { if (i == 0) sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); else sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); qxl_mem_types_list[i].name = qxl_mem_types_names[i]; qxl_mem_types_list[i].show = &qxl_mm_dump_table; qxl_mem_types_list[i].driver_features = 0; if (i == 0) qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; else qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; } return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); #else return 0; #endif }
gpl-2.0
ls2uper/linux
arch/powerpc/kernel/irq.c
679
17946
/* * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan <cort@fsmlabs.com> * Copyright (C) 1996-2001 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit * mask register (of which only 16 are defined), hence the weird shifting * and complement of the cached_irq_mask. I want to be able to stuff * this right into the SIU SMASK register. * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx * to reduce code space and undefined function references. */ #undef DEBUG #include <linux/export.h> #include <linux/threads.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/profile.h> #include <linux/bitops.h> #include <linux/list.h> #include <linux/radix-tree.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <linux/of.h> #include <linux/of_irq.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/cache.h> #include <asm/prom.h> #include <asm/ptrace.h> #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/smp.h> #include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> #include <asm/firmware.h> #include <asm/lv1call.h> #endif #define CREATE_TRACE_POINTS #include <asm/trace.h> DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); int __irq_offset_value; #ifdef CONFIG_PPC32 EXPORT_SYMBOL(__irq_offset_value); atomic_t ppc_n_lost_interrupts; #ifdef CONFIG_TAU_INT extern int tau_initialized; extern int tau_interrupts(int); #endif #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 int distribute_irqs = 1; static inline notrace unsigned long get_irq_happened(void) { unsigned long happened; __asm__ __volatile__("lbz %0,%1(13)" : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); return happened; } static inline notrace void set_soft_enabled(unsigned long enable) { __asm__ __volatile__("stb %0,%1(13)" : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } static inline notrace int decrementer_check_overflow(void) { u64 now = get_tb_or_rtc(); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); return now >= *next_tb; } /* This is called whenever we are re-enabling interrupts * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if * there's an EE, DEC or DBELL to generate. * * This is called in two contexts: From arch_local_irq_restore() * before soft-enabling interrupts, and from the exception exit * path when returning from an interrupt from a soft-disabled to * a soft enabled context. In both case we have interrupts hard * disabled. * * We take care of only clearing the bits we handled in the * PACA irq_happened field since we can only re-emit one at a * time and we don't want to "lose" one. */ notrace unsigned int __check_irq_replay(void) { /* * We use local_paca rather than get_paca() to avoid all * the debug_smp_processor_id() business in this low level * function */ unsigned char happened = local_paca->irq_happened; /* Clear bit 0 which we wouldn't clear otherwise */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. */ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { u64 tmp, tmp2; lv1_get_version_info(&tmp, &tmp2); } /* * We may have missed a decrementer interrupt. We check the * decrementer itself rather than the paca irq_happened field * in case we also had a rollover while hard disabled */ local_paca->irq_happened &= ~PACA_IRQ_DEC; if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) return 0x900; /* Finally check if an external interrupt happened */ local_paca->irq_happened &= ~PACA_IRQ_EE; if (happened & PACA_IRQ_EE) return 0x500; #ifdef CONFIG_PPC_BOOK3E /* Finally check if an EPR external interrupt happened * this bit is typically set if we need to handle another * "edge" interrupt from within the MPIC "EPR" handler */ local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; if (happened & PACA_IRQ_EE_EDGE) return 0x500; local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (happened & PACA_IRQ_DBELL) return 0x280; #else local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (happened & PACA_IRQ_DBELL) { if (cpu_has_feature(CPU_FTR_HVMODE)) return 0xe80; return 0xa00; } #endif /* CONFIG_PPC_BOOK3E */ /* Check if an hypervisor Maintenance interrupt happened */ local_paca->irq_happened &= ~PACA_IRQ_HMI; if (happened & PACA_IRQ_HMI) return 0xe60; /* There should be nothing left ! */ BUG_ON(local_paca->irq_happened != 0); return 0; } notrace void arch_local_irq_restore(unsigned long en) { unsigned char irq_happened; unsigned int replay; /* Write the new soft-enabled value */ set_soft_enabled(en); if (!en) return; /* * From this point onward, we can take interrupts, preempt, * etc... unless we got hard-disabled. We check if an event * happened. If none happened, we know we can just return. * * We may have preempted before the check below, in which case * we are checking the "new" CPU instead of the old one. This * is only a problem if an event happened on the "old" CPU. * * External interrupt events will have caused interrupts to * be hard-disabled, so there is no problem, we * cannot have preempted. */ irq_happened = get_irq_happened(); if (!irq_happened) return; /* * We need to hard disable to get a trusted value from * __check_irq_replay(). We also need to soft-disable * again to avoid warnings in there due to the use of * per-cpu variables. * * We know that if the value in irq_happened is exactly 0x01 * then we are already hard disabled (there are other less * common cases that we'll ignore for now), so we skip the * (expensive) mtmsrd. */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); #ifdef CONFIG_TRACE_IRQFLAGS else { /* * We should already be hard disabled here. We had bugs * where that wasn't the case so let's dbl check it and * warn if we are wrong. Only do that when IRQ tracing * is enabled as mfmsr() can be costly. */ if (WARN_ON(mfmsr() & MSR_EE)) __hard_irq_disable(); } #endif /* CONFIG_TRACE_IRQFLAG */ set_soft_enabled(0); /* * Check if anything needs to be re-emitted. We haven't * soft-enabled yet to avoid warnings in decrementer_check_overflow * accessing per-cpu variables */ replay = __check_irq_replay(); /* We can soft-enable now */ set_soft_enabled(1); /* * And replay if we have to. This will return with interrupts * hard-enabled. */ if (replay) { __replay_interrupt(replay); return; } /* Finally, let's ensure we are hard enabled */ __hard_irq_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); /* * This is specifically called by assembly code to re-enable interrupts * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... * * NOTE: This is called with interrupts hard disabled but not marked * as such in paca->irq_happened, so we need to resync this. */ void notrace restore_interrupts(void) { if (irqs_disabled()) { local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); } else __hard_irq_enable(); } /* * This is a helper to use when about to go into idle low-power * when the latter has the side effect of re-enabling interrupts * (such as calling H_CEDE under pHyp). * * You call this function with interrupts soft-disabled (this is * already the case when ppc_md.power_save is called). The function * will return whether to enter power save or just return. * * In the former case, it will have notified lockdep of interrupts * being re-enabled and generally sanitized the lazy irq state, * and in the latter case it will leave with interrupts hard * disabled and marked as such, so the local_irq_enable() call * in arch_cpu_idle() will properly re-enable everything. */ bool prep_irq_for_idle(void) { /* * First we need to hard disable to ensure no interrupt * occurs before we effectively enter the low power state */ hard_irq_disable(); /* * If anything happened while we were soft-disabled, * we return now and do not enter the low power state. */ if (lazy_irq_pending()) return false; /* Tell lockdep we are about to re-enable */ trace_hardirqs_on(); /* * Mark interrupts as soft-enabled and clear the * PACA_IRQ_HARD_DIS from the pending mask since we * are about to hard enable as well as a side effect * of entering the low power state. */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; local_paca->soft_enabled = 1; /* Tell the caller to enter the low power state */ return true; } #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) { int j; #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) if (tau_initialized) { seq_printf(p, "%*s: ", prec, "TAU"); for_each_online_cpu(j) seq_printf(p, "%10u ", tau_interrupts(j)); seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); } #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); seq_printf(p, " Local timer interrupts for timer event device\n"); seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); seq_printf(p, " Local timer interrupts for others\n"); seq_printf(p, "%*s: ", prec, "SPU"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); seq_printf(p, " Spurious interrupts\n"); seq_printf(p, "%*s: ", prec, "PMI"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); seq_printf(p, " Performance monitoring interrupts\n"); seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); seq_printf(p, " Machine check exceptions\n"); if (cpu_has_feature(CPU_FTR_HVMODE)) { seq_printf(p, "%*s: ", prec, "HMI"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).hmi_exceptions); seq_printf(p, " Hypervisor Maintenance Interrupts\n"); } #ifdef CONFIG_PPC_DOORBELL if (cpu_has_feature(CPU_FTR_DBELL)) { seq_printf(p, "%*s: ", prec, "DBL"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); seq_printf(p, " Doorbell interrupts\n"); } #endif return 0; } /* * /proc/stat helpers */ u64 arch_irq_stat_cpu(unsigned int cpu) { u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; sum += per_cpu(irq_stat, cpu).pmu_irqs; sum += per_cpu(irq_stat, cpu).mce_exceptions; sum += per_cpu(irq_stat, cpu).spurious_irqs; sum += per_cpu(irq_stat, cpu).timer_irqs_others; sum += per_cpu(irq_stat, cpu).hmi_exceptions; #ifdef CONFIG_PPC_DOORBELL sum += per_cpu(irq_stat, cpu).doorbell_irqs; #endif return sum; } #ifdef CONFIG_HOTPLUG_CPU void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; chip = irq_data_get_irq_chip(data); cpumask_and(mask, data->affinity, map); if (cpumask_any(mask) >= nr_cpu_ids) { pr_warn("Breaking affinity for irq %i\n", irq); cpumask_copy(mask, map); } if (chip->irq_set_affinity) chip->irq_set_affinity(data, mask, true); else if (desc->action && !(warned++)) pr_err("Cannot set affinity for irq %i\n", irq); } free_cpumask_var(mask); local_irq_enable(); mdelay(1); local_irq_disable(); } #endif static inline void check_stack_overflow(void) { #ifdef CONFIG_DEBUG_STACKOVERFLOW long sp; sp = current_stack_pointer() & (THREAD_SIZE-1); /* check for stack overflow: is there less than 2KB free? */ if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { pr_err("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } #endif } void __do_irq(struct pt_regs *regs) { unsigned int irq; irq_enter(); trace_irq_entry(regs); check_stack_overflow(); /* * Query the platform PIC for the interrupt & ack it. * * This will typically lower the interrupt line to the CPU */ irq = ppc_md.get_irq(); /* We can hard enable interrupts now to allow perf interrupts */ may_hard_irq_enable(); /* And finally process it */ if (unlikely(irq == NO_IRQ)) __this_cpu_inc(irq_stat.spurious_irqs); else generic_handle_irq(irq); trace_irq_exit(regs); irq_exit(); } void do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct thread_info *curtp, *irqtp, *sirqtp; /* Switch to the irq stack to handle this */ curtp = current_thread_info(); irqtp = hardirq_ctx[raw_smp_processor_id()]; sirqtp = softirq_ctx[raw_smp_processor_id()]; /* Already there ? */ if (unlikely(curtp == irqtp || curtp == sirqtp)) { __do_irq(regs); set_irq_regs(old_regs); return; } /* Prepare the thread_info in the irq stack */ irqtp->task = curtp->task; irqtp->flags = 0; /* Copy the preempt_count so that the [soft]irq checks work. */ irqtp->preempt_count = curtp->preempt_count; /* Switch stack and call */ call_do_irq(regs, irqtp); /* Restore stack limit */ irqtp->task = NULL; /* Copy back updates to the thread_info */ if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); set_irq_regs(old_regs); } void __init init_IRQ(void) { if (ppc_md.init_IRQ) ppc_md.init_IRQ(); exc_lvl_ctx_init(); irq_ctx_init(); } #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; void exc_lvl_ctx_init(void) { struct thread_info *tp; int i, cpu_nr; for_each_possible_cpu(i) { #ifdef CONFIG_PPC64 cpu_nr = i; #else #ifdef CONFIG_SMP cpu_nr = get_hard_smp_processor_id(i); #else cpu_nr = 0; #endif #endif memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; #ifdef CONFIG_BOOKE memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = dbgirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = mcheckirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = HARDIRQ_OFFSET; #endif } } #endif struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; void irq_ctx_init(void) { struct thread_info *tp; int i; for_each_possible_cpu(i) { memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; tp->cpu = i; } } void do_softirq_own_stack(void) { struct thread_info *curtp, *irqtp; curtp = current_thread_info(); irqtp = softirq_ctx[smp_processor_id()]; irqtp->task = curtp->task; irqtp->flags = 0; call_do_softirq(irqtp); irqtp->task = NULL; /* Set any flag that may have been set on the * alternate stack */ if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } irq_hw_number_t virq_to_hw(unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; } EXPORT_SYMBOL_GPL(virq_to_hw); #ifdef CONFIG_SMP int irq_choose_cpu(const struct cpumask *mask) { int cpuid; if (cpumask_equal(mask, cpu_online_mask)) { static int irq_rover; static DEFINE_RAW_SPINLOCK(irq_rover_lock); unsigned long flags; /* Round-robin distribution... */ do_round_robin: raw_spin_lock_irqsave(&irq_rover_lock, flags); irq_rover = cpumask_next(irq_rover, cpu_online_mask); if (irq_rover >= nr_cpu_ids) irq_rover = cpumask_first(cpu_online_mask); cpuid = irq_rover; raw_spin_unlock_irqrestore(&irq_rover_lock, flags); } else { cpuid = cpumask_first_and(mask, cpu_online_mask); if (cpuid >= nr_cpu_ids) goto do_round_robin; } return get_hard_smp_processor_id(cpuid); } #else int irq_choose_cpu(const struct cpumask *mask) { return hard_smp_processor_id(); } #endif int arch_early_irq_init(void) { return 0; } #ifdef CONFIG_PPC64 static int __init setup_noirqdistrib(char *str) { distribute_irqs = 0; return 1; } __setup("noirqdistrib", setup_noirqdistrib); #endif /* CONFIG_PPC64 */
gpl-2.0
andreamerello/linux-stm32
drivers/clk/imx/clk-imx21.c
935
9390
/* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. */ #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/of.h> #include <linux/of_address.h> #include <dt-bindings/clock/imx21-clock.h> #include <soc/imx/timer.h> #include <asm/irq.h> #include "clk.h" #define MX21_CCM_BASE_ADDR 0x10027000 #define MX21_GPT1_BASE_ADDR 0x10003000 #define MX21_INT_GPT1 (NR_IRQS_LEGACY + 26) static void __iomem *ccm __initdata; /* Register offsets */ #define CCM_CSCR (ccm + 0x00) #define CCM_MPCTL0 (ccm + 0x04) #define CCM_SPCTL0 (ccm + 0x0c) #define CCM_PCDR0 (ccm + 0x18) #define CCM_PCDR1 (ccm + 0x1c) #define CCM_PCCR0 (ccm + 0x20) #define CCM_PCCR1 (ccm + 0x24) static const char *mpll_osc_sel_clks[] = { "ckih_gate", "ckih_div1p5", }; static const char *mpll_sel_clks[] = { "fpm_gate", "mpll_osc_sel", }; static const char *spll_sel_clks[] = { "fpm_gate", "mpll_osc_sel", }; static const char *ssi_sel_clks[] = { "spll_gate", "mpll_gate", }; static struct clk *clk[IMX21_CLK_MAX]; static struct clk_onecell_data clk_data; static void __init _mx21_clocks_init(unsigned long lref, unsigned long href) { BUG_ON(!ccm); clk[IMX21_CLK_DUMMY] = imx_clk_fixed("dummy", 0); clk[IMX21_CLK_CKIL] = imx_obtain_fixed_clock("ckil", lref); clk[IMX21_CLK_CKIH] = imx_obtain_fixed_clock("ckih", href); clk[IMX21_CLK_FPM] = imx_clk_fixed_factor("fpm", "ckil", 512, 1); clk[IMX21_CLK_CKIH_DIV1P5] = imx_clk_fixed_factor("ckih_div1p5", "ckih_gate", 2, 3); clk[IMX21_CLK_MPLL_GATE] = imx_clk_gate("mpll_gate", "mpll", CCM_CSCR, 0); clk[IMX21_CLK_SPLL_GATE] = imx_clk_gate("spll_gate", "spll", CCM_CSCR, 1); clk[IMX21_CLK_FPM_GATE] = imx_clk_gate("fpm_gate", "fpm", CCM_CSCR, 2); clk[IMX21_CLK_CKIH_GATE] = imx_clk_gate_dis("ckih_gate", "ckih", CCM_CSCR, 3); clk[IMX21_CLK_MPLL_OSC_SEL] = imx_clk_mux("mpll_osc_sel", CCM_CSCR, 4, 1, mpll_osc_sel_clks, ARRAY_SIZE(mpll_osc_sel_clks)); clk[IMX21_CLK_IPG] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1); clk[IMX21_CLK_HCLK] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4); clk[IMX21_CLK_MPLL_SEL] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks, ARRAY_SIZE(mpll_sel_clks)); clk[IMX21_CLK_SPLL_SEL] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks, ARRAY_SIZE(spll_sel_clks)); clk[IMX21_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", CCM_CSCR, 19, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks)); clk[IMX21_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", CCM_CSCR, 20, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks)); clk[IMX21_CLK_USB_DIV] = imx_clk_divider("usb_div", "spll_gate", CCM_CSCR, 26, 3); clk[IMX21_CLK_FCLK] = imx_clk_divider("fclk", "mpll_gate", CCM_CSCR, 29, 3); clk[IMX21_CLK_MPLL] = imx_clk_pllv1(IMX_PLLV1_IMX21, "mpll", "mpll_sel", CCM_MPCTL0); clk[IMX21_CLK_SPLL] = imx_clk_pllv1(IMX_PLLV1_IMX21, "spll", "spll_sel", CCM_SPCTL0); clk[IMX21_CLK_NFC_DIV] = imx_clk_divider("nfc_div", "fclk", CCM_PCDR0, 12, 4); clk[IMX21_CLK_SSI1_DIV] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6); clk[IMX21_CLK_SSI2_DIV] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 6); clk[IMX21_CLK_PER1] = imx_clk_divider("per1", "mpll_gate", CCM_PCDR1, 0, 6); clk[IMX21_CLK_PER2] = imx_clk_divider("per2", "mpll_gate", CCM_PCDR1, 8, 6); clk[IMX21_CLK_PER3] = imx_clk_divider("per3", "mpll_gate", CCM_PCDR1, 16, 6); clk[IMX21_CLK_PER4] = imx_clk_divider("per4", "mpll_gate", CCM_PCDR1, 24, 6); clk[IMX21_CLK_UART1_IPG_GATE] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0); clk[IMX21_CLK_UART2_IPG_GATE] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1); clk[IMX21_CLK_UART3_IPG_GATE] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2); clk[IMX21_CLK_UART4_IPG_GATE] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3); clk[IMX21_CLK_CSPI1_IPG_GATE] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4); clk[IMX21_CLK_CSPI2_IPG_GATE] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5); clk[IMX21_CLK_SSI1_GATE] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6); clk[IMX21_CLK_SSI2_GATE] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7); clk[IMX21_CLK_SDHC1_IPG_GATE] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9); clk[IMX21_CLK_SDHC2_IPG_GATE] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10); clk[IMX21_CLK_GPIO_GATE] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11); clk[IMX21_CLK_I2C_GATE] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12); clk[IMX21_CLK_DMA_GATE] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13); clk[IMX21_CLK_USB_GATE] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14); clk[IMX21_CLK_EMMA_GATE] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15); clk[IMX21_CLK_SSI2_BAUD_GATE] = imx_clk_gate("ssi2_baud_gate", "ipg", CCM_PCCR0, 16); clk[IMX21_CLK_SSI1_BAUD_GATE] = imx_clk_gate("ssi1_baud_gate", "ipg", CCM_PCCR0, 17); clk[IMX21_CLK_LCDC_IPG_GATE] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18); clk[IMX21_CLK_NFC_GATE] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19); clk[IMX21_CLK_SLCDC_HCLK_GATE] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21); clk[IMX21_CLK_PER4_GATE] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22); clk[IMX21_CLK_BMI_GATE] = imx_clk_gate("bmi_gate", "hclk", CCM_PCCR0, 23); clk[IMX21_CLK_USB_HCLK_GATE] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24); clk[IMX21_CLK_SLCDC_GATE] = imx_clk_gate("slcdc_gate", "hclk", CCM_PCCR0, 25); clk[IMX21_CLK_LCDC_HCLK_GATE] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26); clk[IMX21_CLK_EMMA_HCLK_GATE] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27); clk[IMX21_CLK_BROM_GATE] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28); clk[IMX21_CLK_DMA_HCLK_GATE] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30); clk[IMX21_CLK_CSI_HCLK_GATE] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31); clk[IMX21_CLK_CSPI3_IPG_GATE] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23); clk[IMX21_CLK_WDOG_GATE] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24); clk[IMX21_CLK_GPT1_IPG_GATE] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25); clk[IMX21_CLK_GPT2_IPG_GATE] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26); clk[IMX21_CLK_GPT3_IPG_GATE] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27); clk[IMX21_CLK_PWM_IPG_GATE] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28); clk[IMX21_CLK_RTC_GATE] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29); clk[IMX21_CLK_KPP_GATE] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30); clk[IMX21_CLK_OWIRE_GATE] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31); imx_check_clocks(clk, ARRAY_SIZE(clk)); } int __init mx21_clocks_init(unsigned long lref, unsigned long href) { ccm = ioremap(MX21_CCM_BASE_ADDR, SZ_2K); _mx21_clocks_init(lref, href); clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.0"); clk_register_clkdev(clk[IMX21_CLK_UART1_IPG_GATE], "ipg", "imx21-uart.0"); clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.1"); clk_register_clkdev(clk[IMX21_CLK_UART2_IPG_GATE], "ipg", "imx21-uart.1"); clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.2"); clk_register_clkdev(clk[IMX21_CLK_UART3_IPG_GATE], "ipg", "imx21-uart.2"); clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.3"); clk_register_clkdev(clk[IMX21_CLK_UART4_IPG_GATE], "ipg", "imx21-uart.3"); clk_register_clkdev(clk[IMX21_CLK_GPT1_IPG_GATE], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx-gpt.0"); clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.0"); clk_register_clkdev(clk[IMX21_CLK_CSPI1_IPG_GATE], "ipg", "imx21-cspi.0"); clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.1"); clk_register_clkdev(clk[IMX21_CLK_CSPI2_IPG_GATE], "ipg", "imx21-cspi.1"); clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.2"); clk_register_clkdev(clk[IMX21_CLK_CSPI3_IPG_GATE], "ipg", "imx21-cspi.2"); clk_register_clkdev(clk[IMX21_CLK_PER3], "per", "imx21-fb.0"); clk_register_clkdev(clk[IMX21_CLK_LCDC_IPG_GATE], "ipg", "imx21-fb.0"); clk_register_clkdev(clk[IMX21_CLK_LCDC_HCLK_GATE], "ahb", "imx21-fb.0"); clk_register_clkdev(clk[IMX21_CLK_USB_GATE], "per", "imx21-hcd.0"); clk_register_clkdev(clk[IMX21_CLK_USB_HCLK_GATE], "ahb", "imx21-hcd.0"); clk_register_clkdev(clk[IMX21_CLK_NFC_GATE], NULL, "imx21-nand.0"); clk_register_clkdev(clk[IMX21_CLK_DMA_HCLK_GATE], "ahb", "imx21-dma"); clk_register_clkdev(clk[IMX21_CLK_DMA_GATE], "ipg", "imx21-dma"); clk_register_clkdev(clk[IMX21_CLK_WDOG_GATE], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[IMX21_CLK_I2C_GATE], NULL, "imx21-i2c.0"); clk_register_clkdev(clk[IMX21_CLK_OWIRE_GATE], NULL, "mxc_w1.0"); mxc_timer_init(MX21_GPT1_BASE_ADDR, MX21_INT_GPT1, GPT_TYPE_IMX21); return 0; } static void __init mx21_clocks_init_dt(struct device_node *np) { ccm = of_iomap(np, 0); _mx21_clocks_init(32768, 26000000); clk_data.clks = clk; clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); } CLK_OF_DECLARE(imx27_ccm, "fsl,imx21-ccm", mx21_clocks_init_dt);
gpl-2.0
Kernel-Saram/LG-SU760-Kernel
net/802/stp.c
935
2651
/* * STP SAP demux * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/mutex.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/llc.h> #include <linux/slab.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/stp.h> /* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */ #define GARP_ADDR_MIN 0x20 #define GARP_ADDR_MAX 0x2F #define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; static const struct stp_proto *stp_proto __read_mostly; static struct llc_sap *sap __read_mostly; static unsigned int sap_registered; static DEFINE_MUTEX(stp_proto_mutex); /* Called under rcu_read_lock from LLC */ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ethhdr *eh = eth_hdr(skb); const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); const struct stp_proto *proto; if (pdu->ssap != LLC_SAP_BSPAN || pdu->dsap != LLC_SAP_BSPAN || pdu->ctrl_1 != LLC_PDU_TYPE_U) goto err; if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) { proto = rcu_dereference(garp_protos[eh->h_dest[5] - GARP_ADDR_MIN]); if (proto && compare_ether_addr(eh->h_dest, proto->group_address)) goto err; } else proto = rcu_dereference(stp_proto); if (!proto) goto err; proto->rcv(proto, skb, dev); return 0; err: kfree_skb(skb); return 0; } int stp_proto_register(const struct stp_proto *proto) { int err = 0; mutex_lock(&stp_proto_mutex); if (sap_registered++ == 0) { sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv); if (!sap) { err = -ENOMEM; goto out; } } if (is_zero_ether_addr(proto->group_address)) rcu_assign_pointer(stp_proto, proto); else rcu_assign_pointer(garp_protos[proto->group_address[5] - GARP_ADDR_MIN], proto); out: mutex_unlock(&stp_proto_mutex); return err; } EXPORT_SYMBOL_GPL(stp_proto_register); void stp_proto_unregister(const struct stp_proto *proto) { mutex_lock(&stp_proto_mutex); if (is_zero_ether_addr(proto->group_address)) rcu_assign_pointer(stp_proto, NULL); else rcu_assign_pointer(garp_protos[proto->group_address[5] - GARP_ADDR_MIN], NULL); synchronize_rcu(); if (--sap_registered == 0) llc_sap_put(sap); mutex_unlock(&stp_proto_mutex); } EXPORT_SYMBOL_GPL(stp_proto_unregister); MODULE_LICENSE("GPL");
gpl-2.0
jiangliu/linux
drivers/media/dvb-frontends/rtl2830.c
935
17155
/* * Realtek RTL2830 DVB-T demodulator driver * * Copyright (C) 2011 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * Driver implements own I2C-adapter for tuner I2C access. That's since chip * have unusual I2C-gate control which closes gate automatically after each * I2C transfer. Using own I2C adapter we can workaround that. */ #include "rtl2830_priv.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 /* write multiple hardware registers */ static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len) { int ret; u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.i2c_addr, .flags = 0, .len = 1 + len, .buf = buf, } }; if (1 + len > sizeof(buf)) { dev_warn(&priv->i2c->dev, "%s: i2c wr reg=%04x: len=%d is too big!\n", KBUILD_MODNAME, reg, len); return -EINVAL; } buf[0] = reg; memcpy(&buf[1], val, len); ret = i2c_transfer(priv->i2c, msg, 1); if (ret == 1) { ret = 0; } else { dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \ "len=%d\n", KBUILD_MODNAME, ret, reg, len); ret = -EREMOTEIO; } return ret; } /* read multiple hardware registers */ static int rtl2830_rd(struct rtl2830_priv *priv, u8 reg, u8 *val, int len) { int ret; struct i2c_msg msg[2] = { { .addr = priv->cfg.i2c_addr, .flags = 0, .len = 1, .buf = &reg, }, { .addr = priv->cfg.i2c_addr, .flags = I2C_M_RD, .len = len, .buf = val, } }; ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { ret = 0; } else { dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \ "len=%d\n", KBUILD_MODNAME, ret, reg, len); ret = -EREMOTEIO; } return ret; } /* write multiple registers */ static int rtl2830_wr_regs(struct rtl2830_priv *priv, u16 reg, const u8 *val, int len) { int ret; u8 reg2 = (reg >> 0) & 0xff; u8 page = (reg >> 8) & 0xff; /* switch bank if needed */ if (page != priv->page) { ret = rtl2830_wr(priv, 0x00, &page, 1); if (ret) return ret; priv->page = page; } return rtl2830_wr(priv, reg2, val, len); } /* read multiple registers */ static int rtl2830_rd_regs(struct rtl2830_priv *priv, u16 reg, u8 *val, int len) { int ret; u8 reg2 = (reg >> 0) & 0xff; u8 page = (reg >> 8) & 0xff; /* switch bank if needed */ if (page != priv->page) { ret = rtl2830_wr(priv, 0x00, &page, 1); if (ret) return ret; priv->page = page; } return rtl2830_rd(priv, reg2, val, len); } /* read single register */ static int rtl2830_rd_reg(struct rtl2830_priv *priv, u16 reg, u8 *val) { return rtl2830_rd_regs(priv, reg, val, 1); } /* write single register with mask */ static int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = rtl2830_rd_regs(priv, reg, &tmp, 1); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return rtl2830_wr_regs(priv, reg, &val, 1); } /* read single register with mask */ static int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask) { int ret, i; u8 tmp; ret = rtl2830_rd_regs(priv, reg, &tmp, 1); if (ret) return ret; tmp &= mask; /* find position of the first bit */ for (i = 0; i < 8; i++) { if ((mask >> i) & 0x01) break; } *val = tmp >> i; return 0; } static int rtl2830_init(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; int ret, i; struct rtl2830_reg_val_mask tab[] = { { 0x00d, 0x01, 0x03 }, { 0x00d, 0x10, 0x10 }, { 0x104, 0x00, 0x1e }, { 0x105, 0x80, 0x80 }, { 0x110, 0x02, 0x03 }, { 0x110, 0x08, 0x0c }, { 0x17b, 0x00, 0x40 }, { 0x17d, 0x05, 0x0f }, { 0x17d, 0x50, 0xf0 }, { 0x18c, 0x08, 0x0f }, { 0x18d, 0x00, 0xc0 }, { 0x188, 0x05, 0x0f }, { 0x189, 0x00, 0xfc }, { 0x2d5, 0x02, 0x02 }, { 0x2f1, 0x02, 0x06 }, { 0x2f1, 0x20, 0xf8 }, { 0x16d, 0x00, 0x01 }, { 0x1a6, 0x00, 0x80 }, { 0x106, priv->cfg.vtop, 0x3f }, { 0x107, priv->cfg.krf, 0x3f }, { 0x112, 0x28, 0xff }, { 0x103, priv->cfg.agc_targ_val, 0xff }, { 0x00a, 0x02, 0x07 }, { 0x140, 0x0c, 0x3c }, { 0x140, 0x40, 0xc0 }, { 0x15b, 0x05, 0x07 }, { 0x15b, 0x28, 0x38 }, { 0x15c, 0x05, 0x07 }, { 0x15c, 0x28, 0x38 }, { 0x115, priv->cfg.spec_inv, 0x01 }, { 0x16f, 0x01, 0x07 }, { 0x170, 0x18, 0x38 }, { 0x172, 0x0f, 0x0f }, { 0x173, 0x08, 0x38 }, { 0x175, 0x01, 0x07 }, { 0x176, 0x00, 0xc0 }, }; for (i = 0; i < ARRAY_SIZE(tab); i++) { ret = rtl2830_wr_reg_mask(priv, tab[i].reg, tab[i].val, tab[i].mask); if (ret) goto err; } ret = rtl2830_wr_regs(priv, 0x18f, "\x28\x00", 2); if (ret) goto err; ret = rtl2830_wr_regs(priv, 0x195, "\x04\x06\x0a\x12\x0a\x12\x1e\x28", 8); if (ret) goto err; /* TODO: spec init */ /* soft reset */ ret = rtl2830_wr_reg_mask(priv, 0x101, 0x04, 0x04); if (ret) goto err; ret = rtl2830_wr_reg_mask(priv, 0x101, 0x00, 0x04); if (ret) goto err; priv->sleeping = false; return ret; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_sleep(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; priv->sleeping = true; return 0; } static int rtl2830_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *s) { s->min_delay_ms = 500; s->step_size = fe->ops.info.frequency_stepsize * 2; s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1; return 0; } static int rtl2830_set_frontend(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u64 num; u8 buf[3], tmp; u32 if_ctl, if_frequency; static const u8 bw_params1[3][34] = { { 0x1f, 0xf0, 0x1f, 0xf0, 0x1f, 0xfa, 0x00, 0x17, 0x00, 0x41, 0x00, 0x64, 0x00, 0x67, 0x00, 0x38, 0x1f, 0xde, 0x1f, 0x7a, 0x1f, 0x47, 0x1f, 0x7c, 0x00, 0x30, 0x01, 0x4b, 0x02, 0x82, 0x03, 0x73, 0x03, 0xcf, /* 6 MHz */ }, { 0x1f, 0xfa, 0x1f, 0xda, 0x1f, 0xc1, 0x1f, 0xb3, 0x1f, 0xca, 0x00, 0x07, 0x00, 0x4d, 0x00, 0x6d, 0x00, 0x40, 0x1f, 0xca, 0x1f, 0x4d, 0x1f, 0x2a, 0x1f, 0xb2, 0x00, 0xec, 0x02, 0x7e, 0x03, 0xd0, 0x04, 0x53, /* 7 MHz */ }, { 0x00, 0x10, 0x00, 0x0e, 0x1f, 0xf7, 0x1f, 0xc9, 0x1f, 0xa0, 0x1f, 0xa6, 0x1f, 0xec, 0x00, 0x4e, 0x00, 0x7d, 0x00, 0x3a, 0x1f, 0x98, 0x1f, 0x10, 0x1f, 0x40, 0x00, 0x75, 0x02, 0x5f, 0x04, 0x24, 0x04, 0xdb, /* 8 MHz */ }, }; static const u8 bw_params2[3][6] = { {0xc3, 0x0c, 0x44, 0x33, 0x33, 0x30}, /* 6 MHz */ {0xb8, 0xe3, 0x93, 0x99, 0x99, 0x98}, /* 7 MHz */ {0xae, 0xba, 0xf3, 0x26, 0x66, 0x64}, /* 8 MHz */ }; dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d inversion=%d\n", __func__, c->frequency, c->bandwidth_hz, c->inversion); /* program tuner */ if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe); switch (c->bandwidth_hz) { case 6000000: i = 0; break; case 7000000: i = 1; break; case 8000000: i = 2; break; default: dev_dbg(&priv->i2c->dev, "%s: invalid bandwidth\n", __func__); return -EINVAL; } ret = rtl2830_wr_reg_mask(priv, 0x008, i << 1, 0x06); if (ret) goto err; /* program if frequency */ if (fe->ops.tuner_ops.get_if_frequency) ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency); else ret = -EINVAL; if (ret < 0) goto err; num = if_frequency % priv->cfg.xtal; num *= 0x400000; num = div_u64(num, priv->cfg.xtal); num = -num; if_ctl = num & 0x3fffff; dev_dbg(&priv->i2c->dev, "%s: if_frequency=%d if_ctl=%08x\n", __func__, if_frequency, if_ctl); ret = rtl2830_rd_reg_mask(priv, 0x119, &tmp, 0xc0); /* b[7:6] */ if (ret) goto err; buf[0] = tmp << 6; buf[0] |= (if_ctl >> 16) & 0x3f; buf[1] = (if_ctl >> 8) & 0xff; buf[2] = (if_ctl >> 0) & 0xff; ret = rtl2830_wr_regs(priv, 0x119, buf, 3); if (ret) goto err; /* 1/2 split I2C write */ ret = rtl2830_wr_regs(priv, 0x11c, &bw_params1[i][0], 17); if (ret) goto err; /* 2/2 split I2C write */ ret = rtl2830_wr_regs(priv, 0x12d, &bw_params1[i][17], 17); if (ret) goto err; ret = rtl2830_wr_regs(priv, 0x19d, bw_params2[i], 6); if (ret) goto err; return ret; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_get_frontend(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[3]; if (priv->sleeping) return 0; ret = rtl2830_rd_regs(priv, 0x33c, buf, 2); if (ret) goto err; ret = rtl2830_rd_reg(priv, 0x351, &buf[2]); if (ret) goto err; dev_dbg(&priv->i2c->dev, "%s: TPS=%*ph\n", __func__, 3, buf); switch ((buf[0] >> 2) & 3) { case 0: c->modulation = QPSK; break; case 1: c->modulation = QAM_16; break; case 2: c->modulation = QAM_64; break; } switch ((buf[2] >> 2) & 1) { case 0: c->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: c->transmission_mode = TRANSMISSION_MODE_8K; } switch ((buf[2] >> 0) & 3) { case 0: c->guard_interval = GUARD_INTERVAL_1_32; break; case 1: c->guard_interval = GUARD_INTERVAL_1_16; break; case 2: c->guard_interval = GUARD_INTERVAL_1_8; break; case 3: c->guard_interval = GUARD_INTERVAL_1_4; break; } switch ((buf[0] >> 4) & 7) { case 0: c->hierarchy = HIERARCHY_NONE; break; case 1: c->hierarchy = HIERARCHY_1; break; case 2: c->hierarchy = HIERARCHY_2; break; case 3: c->hierarchy = HIERARCHY_4; break; } switch ((buf[1] >> 3) & 7) { case 0: c->code_rate_HP = FEC_1_2; break; case 1: c->code_rate_HP = FEC_2_3; break; case 2: c->code_rate_HP = FEC_3_4; break; case 3: c->code_rate_HP = FEC_5_6; break; case 4: c->code_rate_HP = FEC_7_8; break; } switch ((buf[1] >> 0) & 7) { case 0: c->code_rate_LP = FEC_1_2; break; case 1: c->code_rate_LP = FEC_2_3; break; case 2: c->code_rate_LP = FEC_3_4; break; case 3: c->code_rate_LP = FEC_5_6; break; case 4: c->code_rate_LP = FEC_7_8; break; } return 0; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct rtl2830_priv *priv = fe->demodulator_priv; int ret; u8 tmp; *status = 0; if (priv->sleeping) return 0; ret = rtl2830_rd_reg_mask(priv, 0x351, &tmp, 0x78); /* [6:3] */ if (ret) goto err; if (tmp == 11) { *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else if (tmp == 10) { *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI; } return ret; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_read_snr(struct dvb_frontend *fe, u16 *snr) { struct rtl2830_priv *priv = fe->demodulator_priv; int ret, hierarchy, constellation; u8 buf[2], tmp; u16 tmp16; #define CONSTELLATION_NUM 3 #define HIERARCHY_NUM 4 static const u32 snr_constant[CONSTELLATION_NUM][HIERARCHY_NUM] = { { 70705899, 70705899, 70705899, 70705899 }, { 82433173, 82433173, 87483115, 94445660 }, { 92888734, 92888734, 95487525, 99770748 }, }; if (priv->sleeping) return 0; /* reports SNR in resolution of 0.1 dB */ ret = rtl2830_rd_reg(priv, 0x33c, &tmp); if (ret) goto err; constellation = (tmp >> 2) & 0x03; /* [3:2] */ if (constellation > CONSTELLATION_NUM - 1) goto err; hierarchy = (tmp >> 4) & 0x07; /* [6:4] */ if (hierarchy > HIERARCHY_NUM - 1) goto err; ret = rtl2830_rd_regs(priv, 0x40c, buf, 2); if (ret) goto err; tmp16 = buf[0] << 8 | buf[1]; if (tmp16) *snr = (snr_constant[constellation][hierarchy] - intlog10(tmp16)) / ((1 << 24) / 100); else *snr = 0; return 0; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber) { struct rtl2830_priv *priv = fe->demodulator_priv; int ret; u8 buf[2]; if (priv->sleeping) return 0; ret = rtl2830_rd_regs(priv, 0x34e, buf, 2); if (ret) goto err; *ber = buf[0] << 8 | buf[1]; return 0; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; return 0; } static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct rtl2830_priv *priv = fe->demodulator_priv; int ret; u8 buf[2]; u16 if_agc_raw, if_agc; if (priv->sleeping) return 0; ret = rtl2830_rd_regs(priv, 0x359, buf, 2); if (ret) goto err; if_agc_raw = (buf[0] << 8 | buf[1]) & 0x3fff; if (if_agc_raw & (1 << 9)) if_agc = -(~(if_agc_raw - 1) & 0x1ff); else if_agc = if_agc_raw; *strength = (u8) (55 - if_agc / 182); *strength |= *strength << 8; return 0; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static struct dvb_frontend_ops rtl2830_ops; static u32 rtl2830_tuner_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int rtl2830_tuner_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct rtl2830_priv *priv = i2c_get_adapdata(i2c_adap); int ret; /* open i2c-gate */ ret = rtl2830_wr_reg_mask(priv, 0x101, 0x08, 0x08); if (ret) goto err; ret = i2c_transfer(priv->i2c, msg, num); if (ret < 0) dev_warn(&priv->i2c->dev, "%s: tuner i2c failed=%d\n", KBUILD_MODNAME, ret); return ret; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static struct i2c_algorithm rtl2830_tuner_i2c_algo = { .master_xfer = rtl2830_tuner_i2c_xfer, .functionality = rtl2830_tuner_i2c_func, }; struct i2c_adapter *rtl2830_get_tuner_i2c_adapter(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; return &priv->tuner_i2c_adapter; } EXPORT_SYMBOL(rtl2830_get_tuner_i2c_adapter); static void rtl2830_release(struct dvb_frontend *fe) { struct rtl2830_priv *priv = fe->demodulator_priv; i2c_del_adapter(&priv->tuner_i2c_adapter); kfree(priv); } struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg, struct i2c_adapter *i2c) { struct rtl2830_priv *priv = NULL; int ret = 0; u8 tmp; /* allocate memory for the internal state */ priv = kzalloc(sizeof(struct rtl2830_priv), GFP_KERNEL); if (priv == NULL) goto err; /* setup the priv */ priv->i2c = i2c; memcpy(&priv->cfg, cfg, sizeof(struct rtl2830_config)); /* check if the demod is there */ ret = rtl2830_rd_reg(priv, 0x000, &tmp); if (ret) goto err; /* create dvb_frontend */ memcpy(&priv->fe.ops, &rtl2830_ops, sizeof(struct dvb_frontend_ops)); priv->fe.demodulator_priv = priv; /* create tuner i2c adapter */ strlcpy(priv->tuner_i2c_adapter.name, "RTL2830 tuner I2C adapter", sizeof(priv->tuner_i2c_adapter.name)); priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; priv->tuner_i2c_adapter.algo_data = NULL; priv->tuner_i2c_adapter.dev.parent = &i2c->dev; i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { dev_err(&i2c->dev, "%s: tuner i2c bus could not be initialized\n", KBUILD_MODNAME); goto err; } priv->sleeping = true; return &priv->fe; err: dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret); kfree(priv); return NULL; } EXPORT_SYMBOL(rtl2830_attach); static struct dvb_frontend_ops rtl2830_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Realtek RTL2830 (DVB-T)", .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = rtl2830_release, .init = rtl2830_init, .sleep = rtl2830_sleep, .get_tune_settings = rtl2830_get_tune_settings, .set_frontend = rtl2830_set_frontend, .get_frontend = rtl2830_get_frontend, .read_status = rtl2830_read_status, .read_snr = rtl2830_read_snr, .read_ber = rtl2830_read_ber, .read_ucblocks = rtl2830_read_ucblocks, .read_signal_strength = rtl2830_read_signal_strength, }; MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Realtek RTL2830 DVB-T demodulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
greguu/linux-4.4-cxx00
fs/reiserfs/item_ops.c
1703
18472
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/time.h> #include "reiserfs.h" /* * this contains item handlers for old item types: sd, direct, * indirect, directory */ /* * and where are the comments? how about saying where we can find an * explanation of each item handler method? -Hans */ /* stat data functions */ static int sd_bytes_number(struct item_head *ih, int block_size) { return 0; } static void sd_decrement_key(struct cpu_key *key) { key->on_disk_key.k_objectid--; set_cpu_key_k_type(key, TYPE_ANY); set_cpu_key_k_offset(key, (loff_t)(~0ULL >> 1)); } static int sd_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize) { return 0; } static char *print_time(time_t t) { static char timebuf[256]; sprintf(timebuf, "%ld", t); return timebuf; } static void sd_print_item(struct item_head *ih, char *item) { printk("\tmode | size | nlinks | first direct | mtime\n"); if (stat_data_v1(ih)) { struct stat_data_v1 *sd = (struct stat_data_v1 *)item; printk("\t0%-6o | %6u | %2u | %d | %s\n", sd_v1_mode(sd), sd_v1_size(sd), sd_v1_nlink(sd), sd_v1_first_direct_byte(sd), print_time(sd_v1_mtime(sd))); } else { struct stat_data *sd = (struct stat_data *)item; printk("\t0%-6o | %6llu | %2u | %d | %s\n", sd_v2_mode(sd), (unsigned long long)sd_v2_size(sd), sd_v2_nlink(sd), sd_v2_rdev(sd), print_time(sd_v2_mtime(sd))); } } static void sd_check_item(struct item_head *ih, char *item) { /* unused */ } static int sd_create_vi(struct virtual_node *vn, struct virtual_item *vi, int is_affected, int insert_size) { vi->vi_index = TYPE_STAT_DATA; return 0; } static int sd_check_left(struct virtual_item *vi, int free, int start_skip, int end_skip) { BUG_ON(start_skip || end_skip); return -1; } static int sd_check_right(struct virtual_item *vi, int free) { return -1; } static int sd_part_size(struct virtual_item *vi, int first, int count) { BUG_ON(count); return 0; } static int sd_unit_num(struct virtual_item *vi) { return vi->vi_item_len - IH_SIZE; } static void sd_print_vi(struct virtual_item *vi) { reiserfs_warning(NULL, "reiserfs-16100", "STATDATA, index %d, type 0x%x, %h", vi->vi_index, vi->vi_type, vi->vi_ih); } static struct item_operations stat_data_ops = { .bytes_number = sd_bytes_number, .decrement_key = sd_decrement_key, .is_left_mergeable = sd_is_left_mergeable, .print_item = sd_print_item, .check_item = sd_check_item, .create_vi = sd_create_vi, .check_left = sd_check_left, .check_right = sd_check_right, .part_size = sd_part_size, .unit_num = sd_unit_num, .print_vi = sd_print_vi }; /* direct item functions */ static int direct_bytes_number(struct item_head *ih, int block_size) { return ih_item_len(ih); } /* FIXME: this should probably switch to indirect as well */ static void direct_decrement_key(struct cpu_key *key) { cpu_key_k_offset_dec(key); if (cpu_key_k_offset(key) == 0) set_cpu_key_k_type(key, TYPE_STAT_DATA); } static int direct_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize) { int version = le_key_version(key); return ((le_key_k_offset(version, key) & (bsize - 1)) != 1); } static void direct_print_item(struct item_head *ih, char *item) { int j = 0; /* return; */ printk("\""); while (j < ih_item_len(ih)) printk("%c", item[j++]); printk("\"\n"); } static void direct_check_item(struct item_head *ih, char *item) { /* unused */ } static int direct_create_vi(struct virtual_node *vn, struct virtual_item *vi, int is_affected, int insert_size) { vi->vi_index = TYPE_DIRECT; return 0; } static int direct_check_left(struct virtual_item *vi, int free, int start_skip, int end_skip) { int bytes; bytes = free - free % 8; return bytes ? : -1; } static int direct_check_right(struct virtual_item *vi, int free) { return direct_check_left(vi, free, 0, 0); } static int direct_part_size(struct virtual_item *vi, int first, int count) { return count; } static int direct_unit_num(struct virtual_item *vi) { return vi->vi_item_len - IH_SIZE; } static void direct_print_vi(struct virtual_item *vi) { reiserfs_warning(NULL, "reiserfs-16101", "DIRECT, index %d, type 0x%x, %h", vi->vi_index, vi->vi_type, vi->vi_ih); } static struct item_operations direct_ops = { .bytes_number = direct_bytes_number, .decrement_key = direct_decrement_key, .is_left_mergeable = direct_is_left_mergeable, .print_item = direct_print_item, .check_item = direct_check_item, .create_vi = direct_create_vi, .check_left = direct_check_left, .check_right = direct_check_right, .part_size = direct_part_size, .unit_num = direct_unit_num, .print_vi = direct_print_vi }; /* indirect item functions */ static int indirect_bytes_number(struct item_head *ih, int block_size) { return ih_item_len(ih) / UNFM_P_SIZE * block_size; } /* decrease offset, if it becomes 0, change type to stat data */ static void indirect_decrement_key(struct cpu_key *key) { cpu_key_k_offset_dec(key); if (cpu_key_k_offset(key) == 0) set_cpu_key_k_type(key, TYPE_STAT_DATA); } /* if it is not first item of the body, then it is mergeable */ static int indirect_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize) { int version = le_key_version(key); return (le_key_k_offset(version, key) != 1); } /* printing of indirect item */ static void start_new_sequence(__u32 * start, int *len, __u32 new) { *start = new; *len = 1; } static int sequence_finished(__u32 start, int *len, __u32 new) { if (start == INT_MAX) return 1; if (start == 0 && new == 0) { (*len)++; return 0; } if (start != 0 && (start + *len) == new) { (*len)++; return 0; } return 1; } static void print_sequence(__u32 start, int len) { if (start == INT_MAX) return; if (len == 1) printk(" %d", start); else printk(" %d(%d)", start, len); } static void indirect_print_item(struct item_head *ih, char *item) { int j; __le32 *unp; __u32 prev = INT_MAX; int num = 0; unp = (__le32 *) item; if (ih_item_len(ih) % UNFM_P_SIZE) reiserfs_warning(NULL, "reiserfs-16102", "invalid item len"); printk("%d pointers\n[ ", (int)I_UNFM_NUM(ih)); for (j = 0; j < I_UNFM_NUM(ih); j++) { if (sequence_finished(prev, &num, get_block_num(unp, j))) { print_sequence(prev, num); start_new_sequence(&prev, &num, get_block_num(unp, j)); } } print_sequence(prev, num); printk("]\n"); } static void indirect_check_item(struct item_head *ih, char *item) { /* unused */ } static int indirect_create_vi(struct virtual_node *vn, struct virtual_item *vi, int is_affected, int insert_size) { vi->vi_index = TYPE_INDIRECT; return 0; } static int indirect_check_left(struct virtual_item *vi, int free, int start_skip, int end_skip) { int bytes; bytes = free - free % UNFM_P_SIZE; return bytes ? : -1; } static int indirect_check_right(struct virtual_item *vi, int free) { return indirect_check_left(vi, free, 0, 0); } /* * return size in bytes of 'units' units. If first == 0 - calculate * from the head (left), otherwise - from tail (right) */ static int indirect_part_size(struct virtual_item *vi, int first, int units) { /* unit of indirect item is byte (yet) */ return units; } static int indirect_unit_num(struct virtual_item *vi) { /* unit of indirect item is byte (yet) */ return vi->vi_item_len - IH_SIZE; } static void indirect_print_vi(struct virtual_item *vi) { reiserfs_warning(NULL, "reiserfs-16103", "INDIRECT, index %d, type 0x%x, %h", vi->vi_index, vi->vi_type, vi->vi_ih); } static struct item_operations indirect_ops = { .bytes_number = indirect_bytes_number, .decrement_key = indirect_decrement_key, .is_left_mergeable = indirect_is_left_mergeable, .print_item = indirect_print_item, .check_item = indirect_check_item, .create_vi = indirect_create_vi, .check_left = indirect_check_left, .check_right = indirect_check_right, .part_size = indirect_part_size, .unit_num = indirect_unit_num, .print_vi = indirect_print_vi }; /* direntry functions */ static int direntry_bytes_number(struct item_head *ih, int block_size) { reiserfs_warning(NULL, "vs-16090", "bytes number is asked for direntry"); return 0; } static void direntry_decrement_key(struct cpu_key *key) { cpu_key_k_offset_dec(key); if (cpu_key_k_offset(key) == 0) set_cpu_key_k_type(key, TYPE_STAT_DATA); } static int direntry_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize) { if (le32_to_cpu(key->u.k_offset_v1.k_offset) == DOT_OFFSET) return 0; return 1; } static void direntry_print_item(struct item_head *ih, char *item) { int i; int namelen; struct reiserfs_de_head *deh; char *name; static char namebuf[80]; printk("\n # %-15s%-30s%-15s%-15s%-15s\n", "Name", "Key of pointed object", "Hash", "Gen number", "Status"); deh = (struct reiserfs_de_head *)item; for (i = 0; i < ih_entry_count(ih); i++, deh++) { namelen = (i ? (deh_location(deh - 1)) : ih_item_len(ih)) - deh_location(deh); name = item + deh_location(deh); if (name[namelen - 1] == 0) namelen = strlen(name); namebuf[0] = '"'; if (namelen > sizeof(namebuf) - 3) { strncpy(namebuf + 1, name, sizeof(namebuf) - 3); namebuf[sizeof(namebuf) - 2] = '"'; namebuf[sizeof(namebuf) - 1] = 0; } else { memcpy(namebuf + 1, name, namelen); namebuf[namelen + 1] = '"'; namebuf[namelen + 2] = 0; } printk("%d: %-15s%-15d%-15d%-15lld%-15lld(%s)\n", i, namebuf, deh_dir_id(deh), deh_objectid(deh), GET_HASH_VALUE(deh_offset(deh)), GET_GENERATION_NUMBER((deh_offset(deh))), (de_hidden(deh)) ? "HIDDEN" : "VISIBLE"); } } static void direntry_check_item(struct item_head *ih, char *item) { int i; struct reiserfs_de_head *deh; /* unused */ deh = (struct reiserfs_de_head *)item; for (i = 0; i < ih_entry_count(ih); i++, deh++) { ; } } #define DIRENTRY_VI_FIRST_DIRENTRY_ITEM 1 /* * function returns old entry number in directory item in real node * using new entry number in virtual item in virtual node */ static inline int old_entry_num(int is_affected, int virtual_entry_num, int pos_in_item, int mode) { if (mode == M_INSERT || mode == M_DELETE) return virtual_entry_num; if (!is_affected) /* cut or paste is applied to another item */ return virtual_entry_num; if (virtual_entry_num < pos_in_item) return virtual_entry_num; if (mode == M_CUT) return virtual_entry_num + 1; RFALSE(mode != M_PASTE || virtual_entry_num == 0, "vs-8015: old_entry_num: mode must be M_PASTE (mode = \'%c\'", mode); return virtual_entry_num - 1; } /* * Create an array of sizes of directory entries for virtual * item. Return space used by an item. FIXME: no control over * consuming of space used by this item handler */ static int direntry_create_vi(struct virtual_node *vn, struct virtual_item *vi, int is_affected, int insert_size) { struct direntry_uarea *dir_u = vi->vi_uarea; int i, j; int size = sizeof(struct direntry_uarea); struct reiserfs_de_head *deh; vi->vi_index = TYPE_DIRENTRY; BUG_ON(!(vi->vi_ih) || !vi->vi_item); dir_u->flags = 0; if (le_ih_k_offset(vi->vi_ih) == DOT_OFFSET) dir_u->flags |= DIRENTRY_VI_FIRST_DIRENTRY_ITEM; deh = (struct reiserfs_de_head *)(vi->vi_item); /* virtual directory item have this amount of entry after */ dir_u->entry_count = ih_entry_count(vi->vi_ih) + ((is_affected) ? ((vn->vn_mode == M_CUT) ? -1 : (vn->vn_mode == M_PASTE ? 1 : 0)) : 0); for (i = 0; i < dir_u->entry_count; i++) { j = old_entry_num(is_affected, i, vn->vn_pos_in_item, vn->vn_mode); dir_u->entry_sizes[i] = (j ? deh_location(&deh[j - 1]) : ih_item_len(vi->vi_ih)) - deh_location(&deh[j]) + DEH_SIZE; } size += (dir_u->entry_count * sizeof(short)); /* set size of pasted entry */ if (is_affected && vn->vn_mode == M_PASTE) dir_u->entry_sizes[vn->vn_pos_in_item] = insert_size; #ifdef CONFIG_REISERFS_CHECK /* compare total size of entries with item length */ { int k, l; l = 0; for (k = 0; k < dir_u->entry_count; k++) l += dir_u->entry_sizes[k]; if (l + IH_SIZE != vi->vi_item_len + ((is_affected && (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT)) ? insert_size : 0)) { reiserfs_panic(NULL, "vs-8025", "(mode==%c, " "insert_size==%d), invalid length of " "directory item", vn->vn_mode, insert_size); } } #endif return size; } /* * return number of entries which may fit into specified amount of * free space, or -1 if free space is not enough even for 1 entry */ static int direntry_check_left(struct virtual_item *vi, int free, int start_skip, int end_skip) { int i; int entries = 0; struct direntry_uarea *dir_u = vi->vi_uarea; for (i = start_skip; i < dir_u->entry_count - end_skip; i++) { /* i-th entry doesn't fit into the remaining free space */ if (dir_u->entry_sizes[i] > free) break; free -= dir_u->entry_sizes[i]; entries++; } if (entries == dir_u->entry_count) { reiserfs_panic(NULL, "item_ops-1", "free space %d, entry_count %d", free, dir_u->entry_count); } /* "." and ".." can not be separated from each other */ if (start_skip == 0 && (dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries < 2) entries = 0; return entries ? : -1; } static int direntry_check_right(struct virtual_item *vi, int free) { int i; int entries = 0; struct direntry_uarea *dir_u = vi->vi_uarea; for (i = dir_u->entry_count - 1; i >= 0; i--) { /* i-th entry doesn't fit into the remaining free space */ if (dir_u->entry_sizes[i] > free) break; free -= dir_u->entry_sizes[i]; entries++; } BUG_ON(entries == dir_u->entry_count); /* "." and ".." can not be separated from each other */ if ((dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries > dir_u->entry_count - 2) entries = dir_u->entry_count - 2; return entries ? : -1; } /* sum of entry sizes between from-th and to-th entries including both edges */ static int direntry_part_size(struct virtual_item *vi, int first, int count) { int i, retval; int from, to; struct direntry_uarea *dir_u = vi->vi_uarea; retval = 0; if (first == 0) from = 0; else from = dir_u->entry_count - count; to = from + count - 1; for (i = from; i <= to; i++) retval += dir_u->entry_sizes[i]; return retval; } static int direntry_unit_num(struct virtual_item *vi) { struct direntry_uarea *dir_u = vi->vi_uarea; return dir_u->entry_count; } static void direntry_print_vi(struct virtual_item *vi) { int i; struct direntry_uarea *dir_u = vi->vi_uarea; reiserfs_warning(NULL, "reiserfs-16104", "DIRENTRY, index %d, type 0x%x, %h, flags 0x%x", vi->vi_index, vi->vi_type, vi->vi_ih, dir_u->flags); printk("%d entries: ", dir_u->entry_count); for (i = 0; i < dir_u->entry_count; i++) printk("%d ", dir_u->entry_sizes[i]); printk("\n"); } static struct item_operations direntry_ops = { .bytes_number = direntry_bytes_number, .decrement_key = direntry_decrement_key, .is_left_mergeable = direntry_is_left_mergeable, .print_item = direntry_print_item, .check_item = direntry_check_item, .create_vi = direntry_create_vi, .check_left = direntry_check_left, .check_right = direntry_check_right, .part_size = direntry_part_size, .unit_num = direntry_unit_num, .print_vi = direntry_print_vi }; /* Error catching functions to catch errors caused by incorrect item types. */ static int errcatch_bytes_number(struct item_head *ih, int block_size) { reiserfs_warning(NULL, "green-16001", "Invalid item type observed, run fsck ASAP"); return 0; } static void errcatch_decrement_key(struct cpu_key *key) { reiserfs_warning(NULL, "green-16002", "Invalid item type observed, run fsck ASAP"); } static int errcatch_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize) { reiserfs_warning(NULL, "green-16003", "Invalid item type observed, run fsck ASAP"); return 0; } static void errcatch_print_item(struct item_head *ih, char *item) { reiserfs_warning(NULL, "green-16004", "Invalid item type observed, run fsck ASAP"); } static void errcatch_check_item(struct item_head *ih, char *item) { reiserfs_warning(NULL, "green-16005", "Invalid item type observed, run fsck ASAP"); } static int errcatch_create_vi(struct virtual_node *vn, struct virtual_item *vi, int is_affected, int insert_size) { reiserfs_warning(NULL, "green-16006", "Invalid item type observed, run fsck ASAP"); /* * We might return -1 here as well, but it won't help as * create_virtual_node() from where this operation is called * from is of return type void. */ return 0; } static int errcatch_check_left(struct virtual_item *vi, int free, int start_skip, int end_skip) { reiserfs_warning(NULL, "green-16007", "Invalid item type observed, run fsck ASAP"); return -1; } static int errcatch_check_right(struct virtual_item *vi, int free) { reiserfs_warning(NULL, "green-16008", "Invalid item type observed, run fsck ASAP"); return -1; } static int errcatch_part_size(struct virtual_item *vi, int first, int count) { reiserfs_warning(NULL, "green-16009", "Invalid item type observed, run fsck ASAP"); return 0; } static int errcatch_unit_num(struct virtual_item *vi) { reiserfs_warning(NULL, "green-16010", "Invalid item type observed, run fsck ASAP"); return 0; } static void errcatch_print_vi(struct virtual_item *vi) { reiserfs_warning(NULL, "green-16011", "Invalid item type observed, run fsck ASAP"); } static struct item_operations errcatch_ops = { errcatch_bytes_number, errcatch_decrement_key, errcatch_is_left_mergeable, errcatch_print_item, errcatch_check_item, errcatch_create_vi, errcatch_check_left, errcatch_check_right, errcatch_part_size, errcatch_unit_num, errcatch_print_vi }; #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3) #error Item types must use disk-format assigned values. #endif struct item_operations *item_ops[TYPE_ANY + 1] = { &stat_data_ops, &indirect_ops, &direct_ops, &direntry_ops, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &errcatch_ops /* This is to catch errors with invalid type (15th entry for TYPE_ANY) */ };
gpl-2.0
MattCrystal/oneXL
drivers/gpu/drm/i915/i915_suspend.c
2215
29640
/* * * Copyright 2008 (c) Intel Corporation * Jesse Barnes <jbarnes@virtuousgeek.org> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "intel_drv.h" #include "i915_reg.h" static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; /* On IVB, 3rd pipe shares PLL with another one */ if (pipe > 1) return false; if (HAS_PCH_SPLIT(dev)) dpll_reg = PCH_DPLL(pipe); else dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); } static void i915_save_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) array[i] = I915_READ(reg + (i << 2)); } static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) I915_WRITE(reg + (i << 2), array[i]); } static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); return I915_READ8(data_port); } static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); return I915_READ8(VGA_AR_DATA_READ); } static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); I915_WRITE8(VGA_AR_DATA_WRITE, val); } static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); I915_WRITE8(data_port, val); } static void i915_save_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* VGA color palette registers */ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); /* MSR bits */ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* CRT controller regs */ i915_write_indexed(dev, cr_index, cr_data, 0x11, i915_read_indexed(dev, cr_index, cr_data, 0x11) & (~0x80)); for (i = 0; i <= 0x24; i++) dev_priv->saveCR[i] = i915_read_indexed(dev, cr_index, cr_data, i); /* Make sure we don't turn off CR group 0 writes */ dev_priv->saveCR[0x11] &= ~0x80; /* Attribute controller registers */ I915_READ8(st01); dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); for (i = 0; i <= 0x14; i++) dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); I915_READ8(st01); /* Graphics controller registers */ for (i = 0; i < 9; i++) dev_priv->saveGR[i] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); dev_priv->saveGR[0x10] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); dev_priv->saveGR[0x11] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); dev_priv->saveGR[0x18] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); /* Sequencer registers */ for (i = 0; i < 8; i++) dev_priv->saveSR[i] = i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); } static void i915_restore_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* MSR bits */ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* Sequencer registers, don't write SR07 */ for (i = 0; i < 7; i++) i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, dev_priv->saveSR[i]); /* CRT controller regs */ /* Enable CR group 0 writes */ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); for (i = 0; i <= 0x24; i++) i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); /* Graphics controller regs */ for (i = 0; i < 9; i++) i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, dev_priv->saveGR[i]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, dev_priv->saveGR[0x10]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, dev_priv->saveGR[0x11]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, dev_priv->saveGR[0x18]); /* Attribute controller registers */ I915_READ8(st01); /* switch back to index mode */ for (i = 0; i <= 0x14; i++) i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); I915_READ8(st01); /* switch back to index mode */ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); I915_READ8(st01); /* VGA color palette registers */ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(_CURACNTR); dev_priv->saveCURAPOS = I915_READ(_CURAPOS); dev_priv->saveCURABASE = I915_READ(_CURABASE); dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); dev_priv->saveCURBPOS = I915_READ(_CURBPOS); dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { dev_priv->saveFPA0 = I915_READ(_FPA0); dev_priv->saveFPA1 = I915_READ(_FPA1); dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); } dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPASURF = I915_READ(_DSPASURF); dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { dev_priv->saveFPB0 = I915_READ(_FPB0); dev_priv->saveFPB1 = I915_READ(_FPB1); dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); } dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } return; } static void i915_restore_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int dpll_a_reg, fpa0_reg, fpa1_reg; int dpll_b_reg, fpb0_reg, fpb1_reg; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 5: case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 3: case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); break; } if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = _PCH_DPLL_A; dpll_b_reg = _PCH_DPLL_B; fpa0_reg = _PCH_FPA0; fpb0_reg = _PCH_FPB0; fpa1_reg = _PCH_FPA1; fpb1_reg = _PCH_FPB1; } else { dpll_a_reg = _DPLL_A; dpll_b_reg = _DPLL_B; fpa0_reg = _FPA0; fpb0_reg = _FPB0; fpa1_reg = _FPA1; fpb1_reg = _FPB1; } if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_a_reg); udelay(150); } I915_WRITE(fpa0_reg, dev_priv->saveFPA0); I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(_DPLL_A_MD); } udelay(150); /* Restore mode */ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_b_reg); udelay(150); } I915_WRITE(fpb0_reg, dev_priv->saveFPB0); I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(_DPLL_B_MD); } udelay(150); /* Restore mode */ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); I915_WRITE(_CURABASE, dev_priv->saveCURABASE); I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); return; } static void i915_save_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); /* This is only meaningful in non-KMS mode */ /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); } else { dev_priv->saveADPA = I915_READ(ADPA); } /* LVDS state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); dev_priv->saveLVDS = I915_READ(PCH_LVDS); } else { dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); if (INTEL_INFO(dev)->gen >= 4) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveLVDS = I915_READ(LVDS); } if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); } else { dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { dev_priv->saveDP_B = I915_READ(DP_B); dev_priv->saveDP_C = I915_READ(DP_C); dev_priv->saveDP_D = I915_READ(DP_D); dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); } /* FIXME: save TV & SDVO state */ /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); } } /* VGA state */ dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA_PD = I915_READ(VGA_PD); if (HAS_PCH_SPLIT(dev)) dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); i915_save_vga(dev); } static void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); /* Display port ratios (must be done before clock is set) */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); else I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) I915_WRITE(LVDS, dev_priv->saveLVDS); if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; * otherwise we get blank eDP screen after S3 on some machines */ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(RSTDBYCTL, dev_priv->saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(DP_B, dev_priv->saveDP_B); I915_WRITE(DP_C, dev_priv->saveDP_C); I915_WRITE(DP_D, dev_priv->saveDP_D); } /* FIXME: restore TV & SDVO state */ /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } } /* VGA state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); I915_WRITE(VGA0, dev_priv->saveVGA0); I915_WRITE(VGA1, dev_priv->saveVGA1); I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); POSTING_READ(VGA_PD); udelay(150); i915_restore_vga(dev); } int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); if (INTEL_INFO(dev)->gen >= 6) gen6_disable_rps(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); return 0; } int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); i915_restore_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->saveDEIER); I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); I915_WRITE(GTIMR, dev_priv->saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); } else { I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IMR, dev_priv->saveIMR); } mutex_unlock(&dev->struct_mutex); if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_init_clock_gating(dev); if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } if (INTEL_INFO(dev)->gen >= 6) { gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } mutex_lock(&dev->struct_mutex); /* Cache mode state */ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); mutex_unlock(&dev->struct_mutex); intel_i2c_reset(dev); return 0; }
gpl-2.0
AndroidGX/SimpleGX-L-5.0.2_BOD6_G901F
drivers/cdrom/cdrom.c
3495
100490
/* linux/drivers/cdrom/cdrom.c Copyright (c) 1996, 1997 David A. van Leeuwen. Copyright (c) 1997, 1998 Erik Andersen <andersee@debian.org> Copyright (c) 1998, 1999 Jens Axboe <axboe@image.dk> May be copied or modified under the terms of the GNU General Public License. See linux/COPYING for more information. Uniform CD-ROM driver for Linux. See Documentation/cdrom/cdrom-standard.tex for usage information. The routines in the file provide a uniform interface between the software that uses CD-ROMs and the various low-level drivers that actually talk to the hardware. Suggestions are welcome. Patches that work are more welcome though. ;-) To Do List: ---------------------------------- -- Modify sysctl/proc interface. I plan on having one directory per drive, with entries for outputing general drive information, and sysctl based tunable parameters such as whether the tray should auto-close for that drive. Suggestions (or patches) for this welcome! Revision History ---------------------------------- 1.00 Date Unknown -- David van Leeuwen <david@tm.tno.nl> -- Initial version by David A. van Leeuwen. I don't have a detailed changelog for the 1.x series, David? 2.00 Dec 2, 1997 -- Erik Andersen <andersee@debian.org> -- New maintainer! As David A. van Leeuwen has been too busy to actively maintain and improve this driver, I am now carrying on the torch. If you have a problem with this driver, please feel free to contact me. -- Added (rudimentary) sysctl interface. I realize this is really weak right now, and is _very_ badly implemented. It will be improved... -- Modified CDROM_DISC_STATUS so that it is now incorporated into the Uniform CD-ROM driver via the cdrom_count_tracks function. The cdrom_count_tracks function helps resolve some of the false assumptions of the CDROM_DISC_STATUS ioctl, and is also used to check for the correct media type when mounting or playing audio from a CD. -- Remove the calls to verify_area and only use the copy_from_user and copy_to_user stuff, since these calls now provide their own memory checking with the 2.1.x kernels. -- Major update to return codes so that errors from low-level drivers are passed on through (thanks to Gerd Knorr for pointing out this problem). -- Made it so if a function isn't implemented in a low-level driver, ENOSYS is now returned instead of EINVAL. -- Simplified some complex logic so that the source code is easier to read. -- Other stuff I probably forgot to mention (lots of changes). 2.01 to 2.11 Dec 1997-Jan 1998 -- TO-DO! Write changelogs for 2.01 to 2.12. 2.12 Jan 24, 1998 -- Erik Andersen <andersee@debian.org> -- Fixed a bug in the IOCTL_IN and IOCTL_OUT macros. It turns out that copy_*_user does not return EFAULT on error, but instead returns the number of bytes not copied. I was returning whatever non-zero stuff came back from the copy_*_user functions directly, which would result in strange errors. 2.13 July 17, 1998 -- Erik Andersen <andersee@debian.org> -- Fixed a bug in CDROM_SELECT_SPEED where you couldn't lower the speed of the drive. Thanks to Tobias Ringstr|m <tori@prosolvia.se> for pointing this out and providing a simple fix. -- Fixed the procfs-unload-module bug with the fill_inode procfs callback. thanks to Andrea Arcangeli -- Fixed it so that the /proc entry now also shows up when cdrom is compiled into the kernel. Before it only worked when loaded as a module. 2.14 August 17, 1998 -- Erik Andersen <andersee@debian.org> -- Fixed a bug in cdrom_media_changed and handling of reporting that the media had changed for devices that _don't_ implement media_changed. Thanks to Grant R. Guenther <grant@torque.net> for spotting this bug. -- Made a few things more pedanticly correct. 2.50 Oct 19, 1998 - Jens Axboe <axboe@image.dk> -- New maintainers! Erik was too busy to continue the work on the driver, so now Chris Zwilling <chris@cloudnet.com> and Jens Axboe <axboe@image.dk> will do their best to follow in his footsteps 2.51 Dec 20, 1998 - Jens Axboe <axboe@image.dk> -- Check if drive is capable of doing what we ask before blindly changing cdi->options in various ioctl. -- Added version to proc entry. 2.52 Jan 16, 1999 - Jens Axboe <axboe@image.dk> -- Fixed an error in open_for_data where we would sometimes not return the correct error value. Thanks Huba Gaspar <huba@softcell.hu>. -- Fixed module usage count - usage was based on /proc/sys/dev instead of /proc/sys/dev/cdrom. This could lead to an oops when other modules had entries in dev. Feb 02 - real bug was in sysctl.c where dev would be removed even though it was used. cdrom.c just illuminated that bug. 2.53 Feb 22, 1999 - Jens Axboe <axboe@image.dk> -- Fixup of several ioctl calls, in particular CDROM_SET_OPTIONS has been "rewritten" because capabilities and options aren't in sync. They should be... -- Added CDROM_LOCKDOOR ioctl. Locks the door and keeps it that way. -- Added CDROM_RESET ioctl. -- Added CDROM_DEBUG ioctl. Enable debug messages on-the-fly. -- Added CDROM_GET_CAPABILITY ioctl. This relieves userspace programs from parsing /proc/sys/dev/cdrom/info. 2.54 Mar 15, 1999 - Jens Axboe <axboe@image.dk> -- Check capability mask from low level driver when counting tracks as per suggestion from Corey J. Scotts <cstotts@blue.weeg.uiowa.edu>. 2.55 Apr 25, 1999 - Jens Axboe <axboe@image.dk> -- autoclose was mistakenly checked against CDC_OPEN_TRAY instead of CDC_CLOSE_TRAY. -- proc info didn't mask against capabilities mask. 3.00 Aug 5, 1999 - Jens Axboe <axboe@image.dk> -- Unified audio ioctl handling across CD-ROM drivers. A lot of the code was duplicated before. Drives that support the generic packet interface are now being fed packets from here instead. -- First attempt at adding support for MMC2 commands - for DVD and CD-R(W) drives. Only the DVD parts are in now - the interface used is the same as for the audio ioctls. -- ioctl cleanups. if a drive couldn't play audio, it didn't get a change to perform device specific ioctls as well. -- Defined CDROM_CAN(CDC_XXX) for checking the capabilities. -- Put in sysctl files for autoclose, autoeject, check_media, debug, and lock. -- /proc/sys/dev/cdrom/info has been updated to also contain info about CD-Rx and DVD capabilities. -- Now default to checking media type. -- CDROM_SEND_PACKET ioctl added. The infrastructure was in place for doing this anyway, with the generic_packet addition. 3.01 Aug 6, 1999 - Jens Axboe <axboe@image.dk> -- Fix up the sysctl handling so that the option flags get set correctly. -- Fix up ioctl handling so the device specific ones actually get called :). 3.02 Aug 8, 1999 - Jens Axboe <axboe@image.dk> -- Fixed volume control on SCSI drives (or others with longer audio page). -- Fixed a couple of DVD minors. Thanks to Andrew T. Veliath <andrewtv@usa.net> for telling me and for having defined the various DVD structures and ioctls in the first place! He designed the original DVD patches for ide-cd and while I rearranged and unified them, the interface is still the same. 3.03 Sep 1, 1999 - Jens Axboe <axboe@image.dk> -- Moved the rest of the audio ioctls from the CD-ROM drivers here. Only CDROMREADTOCENTRY and CDROMREADTOCHDR are left. -- Moved the CDROMREADxxx ioctls in here. -- Defined the cdrom_get_last_written and cdrom_get_next_block as ioctls and exported functions. -- Erik Andersen <andersen@xmission.com> modified all SCMD_ commands to now read GPCMD_ for the new generic packet interface. All low level drivers are updated as well. -- Various other cleanups. 3.04 Sep 12, 1999 - Jens Axboe <axboe@image.dk> -- Fixed a couple of possible memory leaks (if an operation failed and we didn't free the buffer before returning the error). -- Integrated Uniform CD Changer handling from Richard Sharman <rsharman@pobox.com>. -- Defined CD_DVD and CD_CHANGER log levels. -- Fixed the CDROMREADxxx ioctls. -- CDROMPLAYTRKIND uses the GPCMD_PLAY_AUDIO_MSF command - too few drives supported it. We lose the index part, however. -- Small modifications to accommodate opens of /dev/hdc1, required for ide-cd to handle multisession discs. -- Export cdrom_mode_sense and cdrom_mode_select. -- init_cdrom_command() for setting up a cgc command. 3.05 Oct 24, 1999 - Jens Axboe <axboe@image.dk> -- Changed the interface for CDROM_SEND_PACKET. Before it was virtually impossible to send the drive data in a sensible way. -- Lowered stack usage in mmc_ioctl(), dvd_read_disckey(), and dvd_read_manufact. -- Added setup of write mode for packet writing. -- Fixed CDDA ripping with cdda2wav - accept much larger requests of number of frames and split the reads in blocks of 8. 3.06 Dec 13, 1999 - Jens Axboe <axboe@image.dk> -- Added support for changing the region of DVD drives. -- Added sense data to generic command. 3.07 Feb 2, 2000 - Jens Axboe <axboe@suse.de> -- Do same "read header length" trick in cdrom_get_disc_info() as we do in cdrom_get_track_info() -- some drive don't obey specs and fail if they can't supply the full Mt Fuji size table. -- Deleted stuff related to setting up write modes. It has a different home now. -- Clear header length in mode_select unconditionally. -- Removed the register_disk() that was added, not needed here. 3.08 May 1, 2000 - Jens Axboe <axboe@suse.de> -- Fix direction flag in setup_send_key and setup_report_key. This gave some SCSI adapters problems. -- Always return -EROFS for write opens -- Convert to module_init/module_exit style init and remove some of the #ifdef MODULE stuff -- Fix several dvd errors - DVD_LU_SEND_ASF should pass agid, DVD_HOST_SEND_RPC_STATE did not set buffer size in cdb, and dvd_do_auth passed uninitialized data to drive because init_cdrom_command did not clear a 0 sized buffer. 3.09 May 12, 2000 - Jens Axboe <axboe@suse.de> -- Fix Video-CD on SCSI drives that don't support READ_CD command. In that case switch block size and issue plain READ_10 again, then switch back. 3.10 Jun 10, 2000 - Jens Axboe <axboe@suse.de> -- Fix volume control on CD's - old SCSI-II drives now use their own code, as doing MODE6 stuff in here is really not my intention. -- Use READ_DISC_INFO for more reliable end-of-disc. 3.11 Jun 12, 2000 - Jens Axboe <axboe@suse.de> -- Fix bug in getting rpc phase 2 region info. -- Reinstate "correct" CDROMPLAYTRKIND 3.12 Oct 18, 2000 - Jens Axboe <axboe@suse.de> -- Use quiet bit on packet commands not known to work 3.20 Dec 17, 2003 - Jens Axboe <axboe@suse.de> -- Various fixes and lots of cleanups not listed :-) -- Locking fixes -- Mt Rainier support -- DVD-RAM write open fixes Nov 5 2001, Aug 8 2002. Modified by Andy Polyakov <appro@fy.chalmers.se> to support MMC-3 compliant DVD+RW units. Modified by Nigel Kukard <nkukard@lbsd.net> - support DVD+RW 2.4.x patch by Andy Polyakov <appro@fy.chalmers.se> -------------------------------------------------------------------------*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define REVISION "Revision: 3.20" #define VERSION "Id: cdrom.c 3.20 2003/12/17" /* I use an error-log mask to give fine grain control over the type of messages dumped to the system logs. The available masks include: */ #define CD_NOTHING 0x0 #define CD_WARNING 0x1 #define CD_REG_UNREG 0x2 #define CD_DO_IOCTL 0x4 #define CD_OPEN 0x8 #define CD_CLOSE 0x10 #define CD_COUNT_TRACKS 0x20 #define CD_CHANGER 0x40 #define CD_DVD 0x80 /* Define this to remove _all_ the debugging messages */ /* #define ERRLOGMASK CD_NOTHING */ #define ERRLOGMASK CD_WARNING /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */ /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */ #include <linux/module.h> #include <linux/fs.h> #include <linux/major.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/cdrom.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/blkpg.h> #include <linux/init.h> #include <linux/fcntl.h> #include <linux/blkdev.h> #include <linux/times.h> #include <asm/uaccess.h> /* used to tell the module to turn on full debugging messages */ static bool debug; /* default compatibility mode */ static bool autoclose=1; static bool autoeject; static bool lockdoor = 1; /* will we ever get to use this... sigh. */ static bool check_media_type; /* automatically restart mrw format */ static bool mrw_format_restart = 1; module_param(debug, bool, 0); module_param(autoclose, bool, 0); module_param(autoeject, bool, 0); module_param(lockdoor, bool, 0); module_param(check_media_type, bool, 0); module_param(mrw_format_restart, bool, 0); static DEFINE_MUTEX(cdrom_mutex); static const char *mrw_format_status[] = { "not mrw", "bgformat inactive", "bgformat active", "mrw complete", }; static const char *mrw_address_space[] = { "DMA", "GAA" }; #if (ERRLOGMASK!=CD_NOTHING) #define cdinfo(type, fmt, args...) \ do { \ if ((ERRLOGMASK & type) || debug == 1) \ pr_info(fmt, ##args); \ } while (0) #else #define cdinfo(type, fmt, args...) \ do { \ if (0 && (ERRLOGMASK & type) || debug == 1) \ pr_info(fmt, ##args); \ } while (0) #endif /* These are used to simplify getting data in from and back to user land */ #define IOCTL_IN(arg, type, in) \ if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \ return -EFAULT; #define IOCTL_OUT(arg, type, out) \ if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \ return -EFAULT; /* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in a lot of places. This macro makes the code more clear. */ #define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type)) /* used in the audio ioctls */ #define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret /* * Another popular OS uses 7 seconds as the hard timeout for default * commands, so it is a good choice for us as well. */ #define CDROM_DEF_TIMEOUT (7 * HZ) /* Not-exported routines. */ static int open_for_data(struct cdrom_device_info * cdi); static int check_for_audio_disc(struct cdrom_device_info * cdi, struct cdrom_device_ops * cdo); static void sanitize_format(union cdrom_addr *addr, u_char * curr, u_char requested); static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg); int cdrom_get_last_written(struct cdrom_device_info *, long *); static int cdrom_get_next_writable(struct cdrom_device_info *, long *); static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*); static int cdrom_mrw_exit(struct cdrom_device_info *cdi); static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di); static void cdrom_sysctl_register(void); static LIST_HEAD(cdrom_list); static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { if (cgc->sense) { cgc->sense->sense_key = 0x05; cgc->sense->asc = 0x20; cgc->sense->ascq = 0x00; } cgc->stat = -EIO; return -EIO; } /* This macro makes sure we don't have to check on cdrom_device_ops * existence in the run-time routines below. Change_capability is a * hack to have the capability flags defined const, while we can still * change it here without gcc complaining at every line. */ #define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits) int register_cdrom(struct cdrom_device_info *cdi) { static char banner_printed; struct cdrom_device_ops *cdo = cdi->ops; int *change_capability = (int *)&cdo->capability; /* hack */ cdinfo(CD_OPEN, "entering register_cdrom\n"); if (cdo->open == NULL || cdo->release == NULL) return -EINVAL; if (!banner_printed) { pr_info("Uniform CD-ROM driver " REVISION "\n"); banner_printed = 1; cdrom_sysctl_register(); } ENSURE(drive_status, CDC_DRIVE_STATUS ); if (cdo->check_events == NULL && cdo->media_changed == NULL) *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); ENSURE(lock_door, CDC_LOCK); ENSURE(select_speed, CDC_SELECT_SPEED); ENSURE(get_last_session, CDC_MULTI_SESSION); ENSURE(get_mcn, CDC_MCN); ENSURE(reset, CDC_RESET); ENSURE(generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; cdo->n_minors = 0; cdi->options = CDO_USE_FFLAGS; if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY)) cdi->options |= (int) CDO_AUTO_CLOSE; if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY)) cdi->options |= (int) CDO_AUTO_EJECT; if (lockdoor==1) cdi->options |= (int) CDO_LOCK; if (check_media_type==1) cdi->options |= (int) CDO_CHECK_TYPE; if (CDROM_CAN(CDC_MRW_W)) cdi->exit = cdrom_mrw_exit; if (cdi->disk) cdi->cdda_method = CDDA_BPC_FULL; else cdi->cdda_method = CDDA_OLD; if (!cdo->generic_packet) cdo->generic_packet = cdrom_dummy_generic_packet; cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); mutex_lock(&cdrom_mutex); list_add(&cdi->list, &cdrom_list); mutex_unlock(&cdrom_mutex); return 0; } #undef ENSURE void unregister_cdrom(struct cdrom_device_info *cdi) { cdinfo(CD_OPEN, "entering unregister_cdrom\n"); mutex_lock(&cdrom_mutex); list_del(&cdi->list); mutex_unlock(&cdrom_mutex); if (cdi->exit) cdi->exit(cdi); cdi->ops->n_minors--; cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); } int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med) { struct packet_command cgc; unsigned char buffer[8]; struct event_header *eh = (struct event_header *) buffer; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; cgc.cmd[1] = 1; /* IMMED */ cgc.cmd[4] = 1 << 4; /* media event */ cgc.cmd[8] = sizeof(buffer); cgc.quiet = 1; if (cdi->ops->generic_packet(cdi, &cgc)) return 1; if (be16_to_cpu(eh->data_len) < sizeof(*med)) return 1; if (eh->nea || eh->notification_class != 0x4) return 1; memcpy(med, &buffer[sizeof(*eh)], sizeof(*med)); return 0; } /* * the first prototypes used 0x2c as the page code for the mrw mode page, * subsequently this was changed to 0x03. probe the one used by this drive */ static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi) { struct packet_command cgc; char buffer[16]; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.timeout = HZ; cgc.quiet = 1; if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) { cdi->mrw_mode_page = MRW_MODE_PC; return 0; } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) { cdi->mrw_mode_page = MRW_MODE_PC_PRE1; return 0; } return 1; } static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write) { struct packet_command cgc; struct mrw_feature_desc *mfd; unsigned char buffer[16]; int ret; *write = 0; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_CONFIGURATION; cgc.cmd[3] = CDF_MRW; cgc.cmd[8] = sizeof(buffer); cgc.quiet = 1; if ((ret = cdi->ops->generic_packet(cdi, &cgc))) return ret; mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)]; if (be16_to_cpu(mfd->feature_code) != CDF_MRW) return 1; *write = mfd->write; if ((ret = cdrom_mrw_probe_pc(cdi))) { *write = 0; return ret; } return 0; } static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont) { struct packet_command cgc; unsigned char buffer[12]; int ret; pr_info("%sstarting format\n", cont ? "Re" : ""); /* * FmtData bit set (bit 4), format type is 1 */ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE); cgc.cmd[0] = GPCMD_FORMAT_UNIT; cgc.cmd[1] = (1 << 4) | 1; cgc.timeout = 5 * 60 * HZ; /* * 4 byte format list header, 8 byte format list descriptor */ buffer[1] = 1 << 1; buffer[3] = 8; /* * nr_blocks field */ buffer[4] = 0xff; buffer[5] = 0xff; buffer[6] = 0xff; buffer[7] = 0xff; buffer[8] = 0x24 << 2; buffer[11] = cont; ret = cdi->ops->generic_packet(cdi, &cgc); if (ret) pr_info("bgformat failed\n"); return ret; } static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed) { struct packet_command cgc; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_CLOSE_TRACK; /* * Session = 1, Track = 0 */ cgc.cmd[1] = !!immed; cgc.cmd[2] = 1 << 1; cgc.timeout = 5 * 60 * HZ; return cdi->ops->generic_packet(cdi, &cgc); } static int cdrom_flush_cache(struct cdrom_device_info *cdi) { struct packet_command cgc; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_FLUSH_CACHE; cgc.timeout = 5 * 60 * HZ; return cdi->ops->generic_packet(cdi, &cgc); } static int cdrom_mrw_exit(struct cdrom_device_info *cdi) { disc_information di; int ret; ret = cdrom_get_disc_info(cdi, &di); if (ret < 0 || ret < (int)offsetof(typeof(di),disc_type)) return 1; ret = 0; if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) { pr_info("issuing MRW background format suspend\n"); ret = cdrom_mrw_bgformat_susp(cdi, 0); } if (!ret && cdi->media_written) ret = cdrom_flush_cache(cdi); return ret; } static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) { struct packet_command cgc; struct mode_page_header *mph; char buffer[16]; int ret, offset, size; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.buffer = buffer; cgc.buflen = sizeof(buffer); if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0))) return ret; mph = (struct mode_page_header *) buffer; offset = be16_to_cpu(mph->desc_length); size = be16_to_cpu(mph->mode_data_length) + 2; buffer[offset + 3] = space; cgc.buflen = size; if ((ret = cdrom_mode_select(cdi, &cgc))) return ret; pr_info("%s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]); return 0; } static int cdrom_get_random_writable(struct cdrom_device_info *cdi, struct rwrt_feature_desc *rfd) { struct packet_command cgc; char buffer[24]; int ret; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */ cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */ cgc.cmd[8] = sizeof(buffer); /* often 0x18 */ cgc.quiet = 1; if ((ret = cdi->ops->generic_packet(cdi, &cgc))) return ret; memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd)); return 0; } static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi) { struct packet_command cgc; char buffer[16]; __be16 *feature_code; int ret; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_CONFIGURATION; cgc.cmd[3] = CDF_HWDM; cgc.cmd[8] = sizeof(buffer); cgc.quiet = 1; if ((ret = cdi->ops->generic_packet(cdi, &cgc))) return ret; feature_code = (__be16 *) &buffer[sizeof(struct feature_header)]; if (be16_to_cpu(*feature_code) == CDF_HWDM) return 0; return 1; } static int cdrom_is_random_writable(struct cdrom_device_info *cdi, int *write) { struct rwrt_feature_desc rfd; int ret; *write = 0; if ((ret = cdrom_get_random_writable(cdi, &rfd))) return ret; if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) *write = 1; return 0; } static int cdrom_media_erasable(struct cdrom_device_info *cdi) { disc_information di; int ret; ret = cdrom_get_disc_info(cdi, &di); if (ret < 0 || ret < offsetof(typeof(di), n_first_track)) return -1; return di.erasable; } /* * FIXME: check RO bit */ static int cdrom_dvdram_open_write(struct cdrom_device_info *cdi) { int ret = cdrom_media_erasable(cdi); /* * allow writable open if media info read worked and media is * erasable, _or_ if it fails since not all drives support it */ if (!ret) return 1; return 0; } static int cdrom_mrw_open_write(struct cdrom_device_info *cdi) { disc_information di; int ret; /* * always reset to DMA lba space on open */ if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) { pr_err("failed setting lba address space\n"); return 1; } ret = cdrom_get_disc_info(cdi, &di); if (ret < 0 || ret < offsetof(typeof(di),disc_type)) return 1; if (!di.erasable) return 1; /* * mrw_status * 0 - not MRW formatted * 1 - MRW bgformat started, but not running or complete * 2 - MRW bgformat in progress * 3 - MRW formatting complete */ ret = 0; pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]); if (!di.mrw_status) ret = 1; else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE && mrw_format_restart) ret = cdrom_mrw_bgformat(cdi, 1); return ret; } static int mo_open_write(struct cdrom_device_info *cdi) { struct packet_command cgc; char buffer[255]; int ret; init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ); cgc.quiet = 1; /* * obtain write protect information as per * drivers/scsi/sd.c:sd_read_write_protect_flag */ ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0); if (ret) ret = cdrom_mode_sense(cdi, &cgc, GPMODE_VENDOR_PAGE, 0); if (ret) { cgc.buflen = 255; ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0); } /* drive gave us no info, let the user go ahead */ if (ret) return 0; return buffer[3] & 0x80; } static int cdrom_ram_open_write(struct cdrom_device_info *cdi) { struct rwrt_feature_desc rfd; int ret; if ((ret = cdrom_has_defect_mgt(cdi))) return ret; if ((ret = cdrom_get_random_writable(cdi, &rfd))) return ret; else if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) ret = !rfd.curr; cdinfo(CD_OPEN, "can open for random write\n"); return ret; } static void cdrom_mmc3_profile(struct cdrom_device_info *cdi) { struct packet_command cgc; char buffer[32]; int ret, mmc3_profile; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_CONFIGURATION; cgc.cmd[1] = 0; cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */ cgc.cmd[8] = sizeof(buffer); /* Allocation Length */ cgc.quiet = 1; if ((ret = cdi->ops->generic_packet(cdi, &cgc))) mmc3_profile = 0xffff; else mmc3_profile = (buffer[6] << 8) | buffer[7]; cdi->mmc3_profile = mmc3_profile; } static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi) { switch (cdi->mmc3_profile) { case 0x12: /* DVD-RAM */ case 0x1A: /* DVD+RW */ return 0; default: return 1; } } /* * returns 0 for ok to open write, non-0 to disallow */ static int cdrom_open_write(struct cdrom_device_info *cdi) { int mrw, mrw_write, ram_write; int ret = 1; mrw = 0; if (!cdrom_is_mrw(cdi, &mrw_write)) mrw = 1; if (CDROM_CAN(CDC_MO_DRIVE)) ram_write = 1; else (void) cdrom_is_random_writable(cdi, &ram_write); if (mrw) cdi->mask &= ~CDC_MRW; else cdi->mask |= CDC_MRW; if (mrw_write) cdi->mask &= ~CDC_MRW_W; else cdi->mask |= CDC_MRW_W; if (ram_write) cdi->mask &= ~CDC_RAM; else cdi->mask |= CDC_RAM; if (CDROM_CAN(CDC_MRW_W)) ret = cdrom_mrw_open_write(cdi); else if (CDROM_CAN(CDC_DVD_RAM)) ret = cdrom_dvdram_open_write(cdi); else if (CDROM_CAN(CDC_RAM) && !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE)) ret = cdrom_ram_open_write(cdi); else if (CDROM_CAN(CDC_MO_DRIVE)) ret = mo_open_write(cdi); else if (!cdrom_is_dvd_rw(cdi)) ret = 0; return ret; } static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi) { struct packet_command cgc; if (cdi->mmc3_profile != 0x1a) { cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name); return; } if (!cdi->media_written) { cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name); return; } pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_FLUSH_CACHE; cgc.timeout = 30*HZ; cdi->ops->generic_packet(cdi, &cgc); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_CLOSE_TRACK; cgc.timeout = 3000*HZ; cgc.quiet = 1; cdi->ops->generic_packet(cdi, &cgc); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_CLOSE_TRACK; cgc.cmd[2] = 2; /* Close session */ cgc.quiet = 1; cgc.timeout = 3000*HZ; cdi->ops->generic_packet(cdi, &cgc); cdi->media_written = 0; } static int cdrom_close_write(struct cdrom_device_info *cdi) { #if 0 return cdrom_flush_cache(cdi); #else return 0; #endif } /* We use the open-option O_NONBLOCK to indicate that the * purpose of opening is only for subsequent ioctl() calls; no device * integrity checks are performed. * * We hope that all cd-player programs will adopt this convention. It * is in their own interest: device control becomes a lot easier * this way. */ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode) { int ret; cdinfo(CD_OPEN, "entering cdrom_open\n"); /* open is event synchronization point, check events first */ check_disk_change(bdev); /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { ret = cdi->ops->open(cdi, 1); } else { ret = open_for_data(cdi); if (ret) goto err; cdrom_mmc3_profile(cdi); if (mode & FMODE_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) goto err_release; if (!CDROM_CAN(CDC_RAM)) goto err_release; ret = 0; cdi->media_written = 0; } } if (ret) goto err; cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { cdi->ops->lock_door(cdi, 0); cdinfo(CD_OPEN, "door unlocked.\n"); } cdi->ops->release(cdi); err: cdi->use_count--; return ret; } static int open_for_data(struct cdrom_device_info * cdi) { int ret; struct cdrom_device_ops *cdo = cdi->ops; tracktype tracks; cdinfo(CD_OPEN, "entering open_for_data\n"); /* Check if the driver can report drive status. If it can, we can do clever things. If it can't, well, we at least tried! */ if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); cdinfo(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { cdinfo(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { cdinfo(CD_OPEN, "trying to close the tray.\n"); ret=cdo->tray_move(cdi,0); if (ret) { cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care that there is no disc in the drive, since that is the _REAL_ problem here.*/ ret=-ENOMEDIUM; goto clean_up_and_return; } } else { cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); cdinfo(CD_OPEN, "tray might not contain a medium.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } cdinfo(CD_OPEN, "the tray is now closed.\n"); } /* the door should be closed now, check for the disc */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if (ret!=CDS_DISC_OK) { ret = -ENOMEDIUM; goto clean_up_and_return; } } cdrom_count_tracks(cdi, &tracks); if (tracks.error == CDS_NO_DISC) { cdinfo(CD_OPEN, "bummer. no disc.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } /* CD-Players which don't use O_NONBLOCK, workman * for example, need bit CDO_CHECK_TYPE cleared! */ if (tracks.data==0) { if (cdi->options & CDO_CHECK_TYPE) { /* give people a warning shot, now that CDO_CHECK_TYPE is the default case! */ cdinfo(CD_OPEN, "bummer. wrong media type.\n"); cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", (unsigned int)task_pid_nr(current)); ret=-EMEDIUMTYPE; goto clean_up_and_return; } else { cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n"); } } cdinfo(CD_OPEN, "all seems well, opening the device.\n"); /* all seems well, we can open the device */ ret = cdo->open(cdi, 0); /* open for data */ cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret); /* After all this careful checking, we shouldn't have problems opening the device, but we don't want the device locked if this somehow fails... */ if (ret) { cdinfo(CD_OPEN, "open device failed.\n"); goto clean_up_and_return; } if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) { cdo->lock_door(cdi, 1); cdinfo(CD_OPEN, "door locked.\n"); } cdinfo(CD_OPEN, "device opened successfully.\n"); return ret; /* Something failed. Try to unlock the drive, because some drivers (notably ide-cd) lock the drive after every command. This produced a nasty bug where after mount failed, the drive would remain locked! This ensures that the drive gets unlocked after a mount fails. This is a goto to avoid bloating the driver with redundant code. */ clean_up_and_return: cdinfo(CD_OPEN, "open failed.\n"); if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { cdo->lock_door(cdi, 0); cdinfo(CD_OPEN, "door unlocked.\n"); } return ret; } /* This code is similar to that in open_for_data. The routine is called whenever an audio play operation is requested. */ static int check_for_audio_disc(struct cdrom_device_info * cdi, struct cdrom_device_ops * cdo) { int ret; tracktype tracks; cdinfo(CD_OPEN, "entering check_for_audio_disc\n"); if (!(cdi->options & CDO_CHECK_TYPE)) return 0; if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); cdinfo(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { cdinfo(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { cdinfo(CD_OPEN, "trying to close the tray.\n"); ret=cdo->tray_move(cdi,0); if (ret) { cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care that there is no disc in the drive, since that is the _REAL_ problem here.*/ return -ENOMEDIUM; } } else { cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n"); return -ENOMEDIUM; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); return -ENOMEDIUM; } if (ret!=CDS_DISC_OK) { cdinfo(CD_OPEN, "bummer. disc isn't ready.\n"); return -EIO; } cdinfo(CD_OPEN, "the tray is now closed.\n"); } } cdrom_count_tracks(cdi, &tracks); if (tracks.error) return(tracks.error); if (tracks.audio==0) return -EMEDIUMTYPE; return 0; } void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) { struct cdrom_device_ops *cdo = cdi->ops; int opened_for_data; cdinfo(CD_CLOSE, "entering cdrom_release\n"); if (cdi->use_count > 0) cdi->use_count--; if (cdi->use_count == 0) { cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); cdrom_dvd_rw_close_write(cdi); if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { cdinfo(CD_CLOSE, "Unlocking door!\n"); cdo->lock_door(cdi, 0); } } opened_for_data = !(cdi->options & CDO_USE_FFLAGS) || !(mode & FMODE_NDELAY); /* * flush cache on last write release */ if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data) cdrom_close_write(cdi); cdo->release(cdi); if (cdi->use_count == 0) { /* last process that closes dev*/ if (opened_for_data && cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY)) cdo->tray_move(cdi, 1); } } static int cdrom_read_mech_status(struct cdrom_device_info *cdi, struct cdrom_changer_info *buf) { struct packet_command cgc; struct cdrom_device_ops *cdo = cdi->ops; int length; /* * Sanyo changer isn't spec compliant (doesn't use regular change * LOAD_UNLOAD command, and it doesn't implement the mech status * command below */ if (cdi->sanyo_slot) { buf->hdr.nslots = 3; buf->hdr.curslot = cdi->sanyo_slot == 3 ? 0 : cdi->sanyo_slot; for (length = 0; length < 3; length++) { buf->slots[length].disc_present = 1; buf->slots[length].change = 0; } return 0; } length = sizeof(struct cdrom_mechstat_header) + cdi->capacity * sizeof(struct cdrom_slot); init_cdrom_command(&cgc, buf, length, CGC_DATA_READ); cgc.cmd[0] = GPCMD_MECHANISM_STATUS; cgc.cmd[8] = (length >> 8) & 0xff; cgc.cmd[9] = length & 0xff; return cdo->generic_packet(cdi, &cgc); } static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot) { struct cdrom_changer_info *info; int ret; cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n"); if (cdi->sanyo_slot) return CDS_NO_INFO; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; if ((ret = cdrom_read_mech_status(cdi, info))) goto out_free; if (info->slots[slot].disc_present) ret = CDS_DISC_OK; else ret = CDS_NO_DISC; out_free: kfree(info); return ret; } /* Return the number of slots for an ATAPI/SCSI cdrom, * return 1 if not a changer. */ int cdrom_number_of_slots(struct cdrom_device_info *cdi) { int status; int nslots = 1; struct cdrom_changer_info *info; cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n"); /* cdrom_read_mech_status requires a valid value for capacity: */ cdi->capacity = 0; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; if ((status = cdrom_read_mech_status(cdi, info)) == 0) nslots = info->hdr.nslots; kfree(info); return nslots; } /* If SLOT < 0, unload the current slot. Otherwise, try to load SLOT. */ static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot) { struct packet_command cgc; cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n"); if (cdi->sanyo_slot && slot < 0) return 0; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_LOAD_UNLOAD; cgc.cmd[4] = 2 + (slot >= 0); cgc.cmd[8] = slot; cgc.timeout = 60 * HZ; /* The Sanyo 3 CD changer uses byte 7 of the GPCMD_TEST_UNIT_READY to command to switch CDs instead of using the GPCMD_LOAD_UNLOAD opcode. */ if (cdi->sanyo_slot && -1 < slot) { cgc.cmd[0] = GPCMD_TEST_UNIT_READY; cgc.cmd[7] = slot; cgc.cmd[4] = cgc.cmd[8] = 0; cdi->sanyo_slot = slot ? slot : 3; } return cdi->ops->generic_packet(cdi, &cgc); } static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) { struct cdrom_changer_info *info; int curslot; int ret; cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -EDRIVE_CANT_DO_THIS; if (cdi->ops->check_events) cdi->ops->check_events(cdi, 0, slot); else cdi->ops->media_changed(cdi, slot); if (slot == CDSL_NONE) { /* set media changed bits, on both queues */ cdi->mc_flags = 0x3; return cdrom_load_unload(cdi, -1); } info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; if ((ret = cdrom_read_mech_status(cdi, info))) { kfree(info); return ret; } curslot = info->hdr.curslot; kfree(info); if (cdi->use_count > 1 || cdi->keeplocked) { if (slot == CDSL_CURRENT) { return curslot; } else { return -EBUSY; } } /* Specifying CDSL_CURRENT will attempt to load the currnet slot, which is useful if it had been previously unloaded. Whether it can or not, it returns the current slot. Similarly, if slot happens to be the current one, we still try and load it. */ if (slot == CDSL_CURRENT) slot = curslot; /* set media changed bits on both queues */ cdi->mc_flags = 0x3; if ((ret = cdrom_load_unload(cdi, slot))) return ret; return slot; } /* * As cdrom implements an extra ioctl consumer for media changed * event, it needs to buffer ->check_events() output, such that event * is not lost for both the usual VFS and ioctl paths. * cdi->{vfs|ioctl}_events are used to buffer pending events for each * path. * * XXX: Locking is non-existent. cdi->ops->check_events() can be * called in parallel and buffering fields are accessed without any * exclusion. The original media_changed code had the same problem. * It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl * and remove this cruft altogether. It doesn't have much usefulness * at this point. */ static void cdrom_update_events(struct cdrom_device_info *cdi, unsigned int clearing) { unsigned int events; events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT); cdi->vfs_events |= events; cdi->ioctl_events |= events; } unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing) { unsigned int events; cdrom_update_events(cdi, clearing); events = cdi->vfs_events; cdi->vfs_events = 0; return events; } EXPORT_SYMBOL(cdrom_check_events); /* We want to make media_changed accessible to the user through an * ioctl. The main problem now is that we must double-buffer the * low-level implementation, to assure that the VFS and the user both * see a medium change once. */ static int media_changed(struct cdrom_device_info *cdi, int queue) { unsigned int mask = (1 << (queue & 1)); int ret = !!(cdi->mc_flags & mask); bool changed; if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return ret; /* changed since last call? */ if (cdi->ops->check_events) { BUG_ON(!queue); /* shouldn't be called from VFS path */ cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; cdi->ioctl_events = 0; } else changed = cdi->ops->media_changed(cdi, CDSL_CURRENT); if (changed) { cdi->mc_flags = 0x3; /* set bit on both queues */ ret |= 1; cdi->media_written = 0; } cdi->mc_flags &= ~mask; /* clear bit */ return ret; } int cdrom_media_changed(struct cdrom_device_info *cdi) { /* This talks to the VFS, which doesn't like errors - just 1 or 0. * Returning "0" is always safe (media hasn't been changed). Do that * if the low-level cdrom driver dosn't support media changed. */ if (cdi == NULL || cdi->ops->media_changed == NULL) return 0; if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return 0; return media_changed(cdi, 0); } /* badly broken, I know. Is due for a fixup anytime. */ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks) { struct cdrom_tochdr header; struct cdrom_tocentry entry; int ret, i; tracks->data=0; tracks->audio=0; tracks->cdi=0; tracks->xa=0; tracks->error=0; cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); /* Grab the TOC header so we can see how many tracks there are */ if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) { if (ret == -ENOMEDIUM) tracks->error = CDS_NO_DISC; else tracks->error = CDS_NO_INFO; return; } /* check what type of tracks are on this disc */ entry.cdte_format = CDROM_MSF; for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) { entry.cdte_track = i; if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) { tracks->error=CDS_NO_INFO; return; } if (entry.cdte_ctrl & CDROM_DATA_TRACK) { if (entry.cdte_format == 0x10) tracks->cdi++; else if (entry.cdte_format == 0x20) tracks->xa++; else tracks->data++; } else tracks->audio++; cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n", i, entry.cdte_format, entry.cdte_ctrl); } cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n", header.cdth_trk1, tracks->audio, tracks->data, tracks->cdi, tracks->xa); } /* Requests to the low-level drivers will /always/ be done in the following format convention: CDROM_LBA: all data-related requests. CDROM_MSF: all audio-related requests. However, a low-level implementation is allowed to refuse this request, and return information in its own favorite format. It doesn't make sense /at all/ to ask for a play_audio in LBA format, or ask for multi-session info in MSF format. However, for backward compatibility these format requests will be satisfied, but the requests to the low-level drivers will be sanitized in the more meaningful format indicated above. */ static void sanitize_format(union cdrom_addr *addr, u_char * curr, u_char requested) { if (*curr == requested) return; /* nothing to be done! */ if (requested == CDROM_LBA) { addr->lba = (int) addr->msf.frame + 75 * (addr->msf.second - 2 + 60 * addr->msf.minute); } else { /* CDROM_MSF */ int lba = addr->lba; addr->msf.frame = lba % 75; lba /= 75; lba += 2; addr->msf.second = lba % 60; addr->msf.minute = lba / 60; } *curr = requested; } void init_cdrom_command(struct packet_command *cgc, void *buf, int len, int type) { memset(cgc, 0, sizeof(struct packet_command)); if (buf) memset(buf, 0, len); cgc->buffer = (char *) buf; cgc->buflen = len; cgc->data_direction = type; cgc->timeout = CDROM_DEF_TIMEOUT; } /* DVD handling */ #define copy_key(dest,src) memcpy((dest), (src), sizeof(dvd_key)) #define copy_chal(dest,src) memcpy((dest), (src), sizeof(dvd_challenge)) static void setup_report_key(struct packet_command *cgc, unsigned agid, unsigned type) { cgc->cmd[0] = GPCMD_REPORT_KEY; cgc->cmd[10] = type | (agid << 6); switch (type) { case 0: case 8: case 5: { cgc->buflen = 8; break; } case 1: { cgc->buflen = 16; break; } case 2: case 4: { cgc->buflen = 12; break; } } cgc->cmd[9] = cgc->buflen; cgc->data_direction = CGC_DATA_READ; } static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned type) { cgc->cmd[0] = GPCMD_SEND_KEY; cgc->cmd[10] = type | (agid << 6); switch (type) { case 1: { cgc->buflen = 16; break; } case 3: { cgc->buflen = 12; break; } case 6: { cgc->buflen = 8; break; } } cgc->cmd[9] = cgc->buflen; cgc->data_direction = CGC_DATA_WRITE; } static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) { int ret; u_char buf[20]; struct packet_command cgc; struct cdrom_device_ops *cdo = cdi->ops; rpc_state_t rpc_state; memset(buf, 0, sizeof(buf)); init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ); switch (ai->type) { /* LU data send */ case DVD_LU_SEND_AGID: cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lsa.agid, 0); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; ai->lsa.agid = buf[7] >> 6; /* Returning data, let host change state */ break; case DVD_LU_SEND_KEY1: cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n"); setup_report_key(&cgc, ai->lsk.agid, 2); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; copy_key(ai->lsk.key, &buf[4]); /* Returning data, let host change state */ break; case DVD_LU_SEND_CHALLENGE: cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n"); setup_report_key(&cgc, ai->lsc.agid, 1); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; copy_chal(ai->lsc.chal, &buf[4]); /* Returning data, let host change state */ break; /* Post-auth key */ case DVD_LU_SEND_TITLE_KEY: cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lstk.agid, 4); cgc.cmd[5] = ai->lstk.lba; cgc.cmd[4] = ai->lstk.lba >> 8; cgc.cmd[3] = ai->lstk.lba >> 16; cgc.cmd[2] = ai->lstk.lba >> 24; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; ai->lstk.cpm = (buf[4] >> 7) & 1; ai->lstk.cp_sec = (buf[4] >> 6) & 1; ai->lstk.cgms = (buf[4] >> 4) & 3; copy_key(ai->lstk.title_key, &buf[5]); /* Returning data, let host change state */ break; case DVD_LU_SEND_ASF: cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n"); setup_report_key(&cgc, ai->lsasf.agid, 5); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; ai->lsasf.asf = buf[7] & 1; break; /* LU data receive (LU changes state) */ case DVD_HOST_SEND_CHALLENGE: cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n"); setup_send_key(&cgc, ai->hsc.agid, 1); buf[1] = 0xe; copy_chal(&buf[4], ai->hsc.chal); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; ai->type = DVD_LU_SEND_KEY1; break; case DVD_HOST_SEND_KEY2: cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n"); setup_send_key(&cgc, ai->hsk.agid, 3); buf[1] = 0xa; copy_key(&buf[4], ai->hsk.key); if ((ret = cdo->generic_packet(cdi, &cgc))) { ai->type = DVD_AUTH_FAILURE; return ret; } ai->type = DVD_AUTH_ESTABLISHED; break; /* Misc */ case DVD_INVALIDATE_AGID: cgc.quiet = 1; cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); setup_report_key(&cgc, ai->lsa.agid, 0x3f); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; break; /* Get region settings */ case DVD_LU_SEND_RPC_STATE: cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); setup_report_key(&cgc, 0, 8); memset(&rpc_state, 0, sizeof(rpc_state_t)); cgc.buffer = (char *) &rpc_state; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; ai->lrpcs.type = rpc_state.type_code; ai->lrpcs.vra = rpc_state.vra; ai->lrpcs.ucca = rpc_state.ucca; ai->lrpcs.region_mask = rpc_state.region_mask; ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme; break; /* Set region settings */ case DVD_HOST_SEND_RPC_STATE: cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); setup_send_key(&cgc, 0, 6); buf[1] = 6; buf[4] = ai->hrpcs.pdrc; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; break; default: cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); return -ENOTTY; } return 0; } static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { unsigned char buf[21], *base; struct dvd_layer *layer; struct cdrom_device_ops *cdo = cdi->ops; int ret, layer_num = s->physical.layer_num; if (layer_num >= DVD_LAYERS) return -EINVAL; init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; cgc->cmd[6] = layer_num; cgc->cmd[7] = s->type; cgc->cmd[9] = cgc->buflen & 0xff; /* * refrain from reporting errors on non-existing layers (mainly) */ cgc->quiet = 1; ret = cdo->generic_packet(cdi, cgc); if (ret) return ret; base = &buf[4]; layer = &s->physical.layer[layer_num]; /* * place the data... really ugly, but at least we won't have to * worry about endianess in userspace. */ memset(layer, 0, sizeof(*layer)); layer->book_version = base[0] & 0xf; layer->book_type = base[0] >> 4; layer->min_rate = base[1] & 0xf; layer->disc_size = base[1] >> 4; layer->layer_type = base[2] & 0xf; layer->track_path = (base[2] >> 4) & 1; layer->nlayers = (base[2] >> 5) & 3; layer->track_density = base[3] & 0xf; layer->linear_density = base[3] >> 4; layer->start_sector = base[5] << 16 | base[6] << 8 | base[7]; layer->end_sector = base[9] << 16 | base[10] << 8 | base[11]; layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15]; layer->bca = base[16] >> 7; return 0; } static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { int ret; u_char buf[8]; struct cdrom_device_ops *cdo = cdi->ops; init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; cgc->cmd[6] = s->copyright.layer_num; cgc->cmd[7] = s->type; cgc->cmd[8] = cgc->buflen >> 8; cgc->cmd[9] = cgc->buflen & 0xff; ret = cdo->generic_packet(cdi, cgc); if (ret) return ret; s->copyright.cpst = buf[4]; s->copyright.rmi = buf[5]; return 0; } static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { int ret, size; u_char *buf; struct cdrom_device_ops *cdo = cdi->ops; size = sizeof(s->disckey.value) + 4; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; init_cdrom_command(cgc, buf, size, CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; cgc->cmd[7] = s->type; cgc->cmd[8] = size >> 8; cgc->cmd[9] = size & 0xff; cgc->cmd[10] = s->disckey.agid << 6; ret = cdo->generic_packet(cdi, cgc); if (!ret) memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value)); kfree(buf); return ret; } static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { int ret, size = 4 + 188; u_char *buf; struct cdrom_device_ops *cdo = cdi->ops; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; init_cdrom_command(cgc, buf, size, CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; cgc->cmd[7] = s->type; cgc->cmd[9] = cgc->buflen & 0xff; ret = cdo->generic_packet(cdi, cgc); if (ret) goto out; s->bca.len = buf[0] << 8 | buf[1]; if (s->bca.len < 12 || s->bca.len > 188) { cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); ret = -EIO; goto out; } memcpy(s->bca.value, &buf[4], s->bca.len); ret = 0; out: kfree(buf); return ret; } static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { int ret = 0, size; u_char *buf; struct cdrom_device_ops *cdo = cdi->ops; size = sizeof(s->manufact.value) + 4; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; init_cdrom_command(cgc, buf, size, CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; cgc->cmd[7] = s->type; cgc->cmd[8] = size >> 8; cgc->cmd[9] = size & 0xff; ret = cdo->generic_packet(cdi, cgc); if (ret) goto out; s->manufact.len = buf[0] << 8 | buf[1]; if (s->manufact.len < 0) { cdinfo(CD_WARNING, "Received invalid manufacture info length" " (%d)\n", s->manufact.len); ret = -EIO; } else { if (s->manufact.len > 2048) { cdinfo(CD_WARNING, "Received invalid manufacture info " "length (%d): truncating to 2048\n", s->manufact.len); s->manufact.len = 2048; } memcpy(s->manufact.value, &buf[4], s->manufact.len); } out: kfree(buf); return ret; } static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s, struct packet_command *cgc) { switch (s->type) { case DVD_STRUCT_PHYSICAL: return dvd_read_physical(cdi, s, cgc); case DVD_STRUCT_COPYRIGHT: return dvd_read_copyright(cdi, s, cgc); case DVD_STRUCT_DISCKEY: return dvd_read_disckey(cdi, s, cgc); case DVD_STRUCT_BCA: return dvd_read_bca(cdi, s, cgc); case DVD_STRUCT_MANUFACT: return dvd_read_manufact(cdi, s, cgc); default: cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", s->type); return -EINVAL; } } int cdrom_mode_sense(struct cdrom_device_info *cdi, struct packet_command *cgc, int page_code, int page_control) { struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); cgc->cmd[0] = GPCMD_MODE_SENSE_10; cgc->cmd[2] = page_code | (page_control << 6); cgc->cmd[7] = cgc->buflen >> 8; cgc->cmd[8] = cgc->buflen & 0xff; cgc->data_direction = CGC_DATA_READ; return cdo->generic_packet(cdi, cgc); } int cdrom_mode_select(struct cdrom_device_info *cdi, struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); memset(cgc->buffer, 0, 2); cgc->cmd[0] = GPCMD_MODE_SELECT_10; cgc->cmd[1] = 0x10; /* PF */ cgc->cmd[7] = cgc->buflen >> 8; cgc->cmd[8] = cgc->buflen & 0xff; cgc->data_direction = CGC_DATA_WRITE; return cdo->generic_packet(cdi, cgc); } static int cdrom_read_subchannel(struct cdrom_device_info *cdi, struct cdrom_subchnl *subchnl, int mcn) { struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; char buffer[32]; int ret; init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; cgc.cmd[1] = 2; /* MSF addressing */ cgc.cmd[2] = 0x40; /* request subQ data */ cgc.cmd[3] = mcn ? 2 : 1; cgc.cmd[8] = 16; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; subchnl->cdsc_audiostatus = cgc.buffer[1]; subchnl->cdsc_format = CDROM_MSF; subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf; subchnl->cdsc_trk = cgc.buffer[6]; subchnl->cdsc_ind = cgc.buffer[7]; subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13]; subchnl->cdsc_reladdr.msf.second = cgc.buffer[14]; subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15]; subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9]; subchnl->cdsc_absaddr.msf.second = cgc.buffer[10]; subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11]; return 0; } /* * Specific READ_10 interface */ static int cdrom_read_cd(struct cdrom_device_info *cdi, struct packet_command *cgc, int lba, int blocksize, int nblocks) { struct cdrom_device_ops *cdo = cdi->ops; memset(&cgc->cmd, 0, sizeof(cgc->cmd)); cgc->cmd[0] = GPCMD_READ_10; cgc->cmd[2] = (lba >> 24) & 0xff; cgc->cmd[3] = (lba >> 16) & 0xff; cgc->cmd[4] = (lba >> 8) & 0xff; cgc->cmd[5] = lba & 0xff; cgc->cmd[6] = (nblocks >> 16) & 0xff; cgc->cmd[7] = (nblocks >> 8) & 0xff; cgc->cmd[8] = nblocks & 0xff; cgc->buflen = blocksize * nblocks; return cdo->generic_packet(cdi, cgc); } /* very generic interface for reading the various types of blocks */ static int cdrom_read_block(struct cdrom_device_info *cdi, struct packet_command *cgc, int lba, int nblocks, int format, int blksize) { struct cdrom_device_ops *cdo = cdi->ops; memset(&cgc->cmd, 0, sizeof(cgc->cmd)); cgc->cmd[0] = GPCMD_READ_CD; /* expected sector size - cdda,mode1,etc. */ cgc->cmd[1] = format << 2; /* starting address */ cgc->cmd[2] = (lba >> 24) & 0xff; cgc->cmd[3] = (lba >> 16) & 0xff; cgc->cmd[4] = (lba >> 8) & 0xff; cgc->cmd[5] = lba & 0xff; /* number of blocks */ cgc->cmd[6] = (nblocks >> 16) & 0xff; cgc->cmd[7] = (nblocks >> 8) & 0xff; cgc->cmd[8] = nblocks & 0xff; cgc->buflen = blksize * nblocks; /* set the header info returned */ switch (blksize) { case CD_FRAMESIZE_RAW0 : cgc->cmd[9] = 0x58; break; case CD_FRAMESIZE_RAW1 : cgc->cmd[9] = 0x78; break; case CD_FRAMESIZE_RAW : cgc->cmd[9] = 0xf8; break; default : cgc->cmd[9] = 0x10; } return cdo->generic_packet(cdi, cgc); } static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, int lba, int nframes) { struct packet_command cgc; int ret = 0; int nr; cdi->last_sense = 0; memset(&cgc, 0, sizeof(cgc)); /* * start with will ra.nframes size, back down if alloc fails */ nr = nframes; do { cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL); if (cgc.buffer) break; nr >>= 1; } while (nr); if (!nr) return -ENOMEM; cgc.data_direction = CGC_DATA_READ; while (nframes > 0) { if (nr > nframes) nr = nframes; ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); if (ret) break; if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { ret = -EFAULT; break; } ubuf += CD_FRAMESIZE_RAW * nr; nframes -= nr; lba += nr; } kfree(cgc.buffer); return ret; } static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, int lba, int nframes) { struct request_queue *q = cdi->disk->queue; struct request *rq; struct bio *bio; unsigned int len; int nr, ret = 0; if (!q) return -ENXIO; cdi->last_sense = 0; while (nframes) { nr = nframes; if (cdi->cdda_method == CDDA_BPC_SINGLE) nr = 1; if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9)) nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW; len = nr * CD_FRAMESIZE_RAW; rq = blk_get_request(q, READ, GFP_KERNEL); if (!rq) { ret = -ENOMEM; break; } ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); if (ret) { blk_put_request(rq); break; } rq->cmd[0] = GPCMD_READ_CD; rq->cmd[1] = 1 << 2; rq->cmd[2] = (lba >> 24) & 0xff; rq->cmd[3] = (lba >> 16) & 0xff; rq->cmd[4] = (lba >> 8) & 0xff; rq->cmd[5] = lba & 0xff; rq->cmd[6] = (nr >> 16) & 0xff; rq->cmd[7] = (nr >> 8) & 0xff; rq->cmd[8] = nr & 0xff; rq->cmd[9] = 0xf8; rq->cmd_len = 12; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->timeout = 60 * HZ; bio = rq->bio; if (blk_execute_rq(q, cdi->disk, rq, 0)) { struct request_sense *s = rq->sense; ret = -EIO; cdi->last_sense = s->sense_key; } if (blk_rq_unmap_user(bio)) ret = -EFAULT; blk_put_request(rq); if (ret) break; nframes -= nr; lba += nr; ubuf += len; } return ret; } static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf, int lba, int nframes) { int ret; if (cdi->cdda_method == CDDA_OLD) return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); retry: /* * for anything else than success and io error, we need to retry */ ret = cdrom_read_cdda_bpc(cdi, ubuf, lba, nframes); if (!ret || ret != -EIO) return ret; /* * I've seen drives get sense 4/8/3 udma crc errors on multi * frame dma, so drop to single frame dma if we need to */ if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) { pr_info("dropping to single frame dma\n"); cdi->cdda_method = CDDA_BPC_SINGLE; goto retry; } /* * so we have an io error of some sort with multi frame dma. if the * condition wasn't a hardware error * problems, not for any error */ if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b) return ret; pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense); cdi->cdda_method = CDDA_OLD; return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); } static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_multisession ms_info; u8 requested_format; int ret; cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); if (!(cdi->ops->capability & CDC_MULTI_SESSION)) return -ENOSYS; if (copy_from_user(&ms_info, argp, sizeof(ms_info))) return -EFAULT; requested_format = ms_info.addr_format; if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) return -EINVAL; ms_info.addr_format = CDROM_LBA; ret = cdi->ops->get_last_session(cdi, &ms_info); if (ret) return ret; sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format); if (copy_to_user(argp, &ms_info, sizeof(ms_info))) return -EFAULT; cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); return 0; } static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) { cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; if (cdi->use_count != 1 || cdi->keeplocked) return -EBUSY; if (CDROM_CAN(CDC_LOCK)) { int ret = cdi->ops->lock_door(cdi, 0); if (ret) return ret; } return cdi->ops->tray_move(cdi, 1); } static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) { cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); if (!CDROM_CAN(CDC_CLOSE_TRAY)) return -ENOSYS; return cdi->ops->tray_move(cdi, 0); } static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; if (cdi->keeplocked) return -EBUSY; cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); if (arg) cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT; return 0; } static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, unsigned long arg) { struct cdrom_changer_info *info; int ret; cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return -ENOSYS; /* cannot select disc or select current disc */ if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) return media_changed(cdi, 1); if ((unsigned int)arg >= cdi->capacity) return -EINVAL; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; ret = cdrom_read_mech_status(cdi, info); if (!ret) ret = info->slots[arg].change; kfree(info); return ret; } static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); /* * Options need to be in sync with capability. * Too late for that, so we have to check each one separately. */ switch (arg) { case CDO_USE_FFLAGS: case CDO_CHECK_TYPE: break; case CDO_LOCK: if (!CDROM_CAN(CDC_LOCK)) return -ENOSYS; break; case 0: return cdi->options; /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */ default: if (!CDROM_CAN(arg)) return -ENOSYS; } cdi->options |= (int) arg; return cdi->options; } static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); cdi->options &= ~(int) arg; return cdi->options; } static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); if (!CDROM_CAN(CDC_SELECT_SPEED)) return -ENOSYS; return cdi->ops->select_speed(cdi, arg); } static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -ENOSYS; if (arg != CDSL_CURRENT && arg != CDSL_NONE) { if ((int)arg >= cdi->capacity) return -EINVAL; } /* * ->select_disc is a hook to allow a driver-specific way of * seleting disc. However, since there is no equivalent hook for * cdrom_slot_status this may not actually be useful... */ if (cdi->ops->select_disc) return cdi->ops->select_disc(cdi, arg); cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); return cdrom_select_disc(cdi, arg); } static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, struct block_device *bdev) { cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!CDROM_CAN(CDC_RESET)) return -ENOSYS; invalidate_bdev(bdev); return cdi->ops->reset(cdi); } static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); if (!CDROM_CAN(CDC_LOCK)) return -EDRIVE_CANT_DO_THIS; cdi->keeplocked = arg ? 1 : 0; /* * Don't unlock the door on multiple opens by default, but allow * root to do so. */ if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN)) return -EBUSY; return cdi->ops->lock_door(cdi, arg); } static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; debug = arg ? 1 : 0; return debug; } static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi) { cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); return (cdi->ops->capability & ~cdi->mask); } /* * The following function is implemented, although very few audio * discs give Universal Product Code information, which should just be * the Medium Catalog Number on the box. Note, that the way the code * is written on the CD is /not/ uniform across all discs! */ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_mcn mcn; int ret; cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); if (!(cdi->ops->capability & CDC_MCN)) return -ENOSYS; ret = cdi->ops->get_mcn(cdi, &mcn); if (ret) return ret; if (copy_to_user(argp, &mcn, sizeof(mcn))) return -EFAULT; cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); return 0; } static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, unsigned long arg) { cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); if (!(cdi->ops->capability & CDC_DRIVE_STATUS)) return -ENOSYS; if (!CDROM_CAN(CDC_SELECT_DISC) || (arg == CDSL_CURRENT || arg == CDSL_NONE)) return cdi->ops->drive_status(cdi, CDSL_CURRENT); if (((int)arg >= cdi->capacity)) return -EINVAL; return cdrom_slot_status(cdi, arg); } /* * Ok, this is where problems start. The current interface for the * CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunately, while this * is often the case, it is also very common for CDs to have some tracks * with data, and some tracks with audio. Just because I feel like it, * I declare the following to be the best way to cope. If the CD has ANY * data tracks on it, it will be returned as a data CD. If it has any XA * tracks, I will return it as that. Now I could simplify this interface * by combining these returns with the above, but this more clearly * demonstrates the problem with the current interface. Too bad this * wasn't designed to use bitmasks... -Erik * * Well, now we have the option CDS_MIXED: a mixed-type CD. * User level programmers might feel the ioctl is not very useful. * ---david */ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) { tracktype tracks; cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); cdrom_count_tracks(cdi, &tracks); if (tracks.error) return tracks.error; /* Policy mode on */ if (tracks.audio > 0) { if (!tracks.data && !tracks.cdi && !tracks.xa) return CDS_AUDIO; else return CDS_MIXED; } if (tracks.cdi > 0) return CDS_XA_2_2; if (tracks.xa > 0) return CDS_XA_2_1; if (tracks.data > 0) return CDS_DATA_1; /* Policy mode off */ cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); return CDS_NO_INFO; } static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi) { cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); return cdi->capacity; } static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_subchnl q; u8 requested, back; int ret; /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ if (copy_from_user(&q, argp, sizeof(q))) return -EFAULT; requested = q.cdsc_format; if (requested != CDROM_MSF && requested != CDROM_LBA) return -EINVAL; q.cdsc_format = CDROM_MSF; ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q); if (ret) return ret; back = q.cdsc_format; /* local copy */ sanitize_format(&q.cdsc_absaddr, &back, requested); sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); if (copy_to_user(argp, &q, sizeof(q))) return -EFAULT; /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_tochdr header; int ret; /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ if (copy_from_user(&header, argp, sizeof(header))) return -EFAULT; ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); if (ret) return ret; if (copy_to_user(argp, &header, sizeof(header))) return -EFAULT; /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ return 0; } static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_tocentry entry; u8 requested_format; int ret; /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ if (copy_from_user(&entry, argp, sizeof(entry))) return -EFAULT; requested_format = entry.cdte_format; if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) return -EINVAL; /* make interface to low-level uniform */ entry.cdte_format = CDROM_MSF; ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry); if (ret) return ret; sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format); if (copy_to_user(argp, &entry, sizeof(entry))) return -EFAULT; /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ return 0; } static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_msf msf; cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; if (copy_from_user(&msf, argp, sizeof(msf))) return -EFAULT; return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf); } static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_ti ti; int ret; cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; if (copy_from_user(&ti, argp, sizeof(ti))) return -EFAULT; ret = check_for_audio_disc(cdi, cdi->ops); if (ret) return ret; return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti); } static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_volctrl volume; cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; if (copy_from_user(&volume, argp, sizeof(volume))) return -EFAULT; return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume); } static int cdrom_ioctl_volread(struct cdrom_device_info *cdi, void __user *argp) { struct cdrom_volctrl volume; int ret; cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume); if (ret) return ret; if (copy_to_user(argp, &volume, sizeof(volume))) return -EFAULT; return 0; } static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, unsigned int cmd) { int ret; cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; ret = check_for_audio_disc(cdi, cdi->ops); if (ret) return ret; return cdi->ops->audio_ioctl(cdi, cmd, NULL); } /* * Just about every imaginable ioctl is supported in the Uniform layer * these days. * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). */ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int ret; /* * Try the generic SCSI command ioctl's first. */ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); if (ret != -ENOTTY) return ret; switch (cmd) { case CDROMMULTISESSION: return cdrom_ioctl_multisession(cdi, argp); case CDROMEJECT: return cdrom_ioctl_eject(cdi); case CDROMCLOSETRAY: return cdrom_ioctl_closetray(cdi); case CDROMEJECT_SW: return cdrom_ioctl_eject_sw(cdi, arg); case CDROM_MEDIA_CHANGED: return cdrom_ioctl_media_changed(cdi, arg); case CDROM_SET_OPTIONS: return cdrom_ioctl_set_options(cdi, arg); case CDROM_CLEAR_OPTIONS: return cdrom_ioctl_clear_options(cdi, arg); case CDROM_SELECT_SPEED: return cdrom_ioctl_select_speed(cdi, arg); case CDROM_SELECT_DISC: return cdrom_ioctl_select_disc(cdi, arg); case CDROMRESET: return cdrom_ioctl_reset(cdi, bdev); case CDROM_LOCKDOOR: return cdrom_ioctl_lock_door(cdi, arg); case CDROM_DEBUG: return cdrom_ioctl_debug(cdi, arg); case CDROM_GET_CAPABILITY: return cdrom_ioctl_get_capability(cdi); case CDROM_GET_MCN: return cdrom_ioctl_get_mcn(cdi, argp); case CDROM_DRIVE_STATUS: return cdrom_ioctl_drive_status(cdi, arg); case CDROM_DISC_STATUS: return cdrom_ioctl_disc_status(cdi); case CDROM_CHANGER_NSLOTS: return cdrom_ioctl_changer_nslots(cdi); } /* * Use the ioctls that are implemented through the generic_packet() * interface. this may look at bit funny, but if -ENOTTY is * returned that particular ioctl is not implemented and we * let it go through the device specific ones. */ if (CDROM_CAN(CDC_GENERIC_PACKET)) { ret = mmc_ioctl(cdi, cmd, arg); if (ret != -ENOTTY) return ret; } /* * Note: most of the cdinfo() calls are commented out here, * because they fill up the sys log when CD players poll * the drive. */ switch (cmd) { case CDROMSUBCHNL: return cdrom_ioctl_get_subchnl(cdi, argp); case CDROMREADTOCHDR: return cdrom_ioctl_read_tochdr(cdi, argp); case CDROMREADTOCENTRY: return cdrom_ioctl_read_tocentry(cdi, argp); case CDROMPLAYMSF: return cdrom_ioctl_play_msf(cdi, argp); case CDROMPLAYTRKIND: return cdrom_ioctl_play_trkind(cdi, argp); case CDROMVOLCTRL: return cdrom_ioctl_volctrl(cdi, argp); case CDROMVOLREAD: return cdrom_ioctl_volread(cdi, argp); case CDROMSTART: case CDROMSTOP: case CDROMPAUSE: case CDROMRESUME: return cdrom_ioctl_audioctl(cdi, cmd); } return -ENOSYS; } /* * Required when we need to use READ_10 to issue other than 2048 block * reads */ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) { struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; struct modesel_head mh; memset(&mh, 0, sizeof(mh)); mh.block_desc_length = 0x08; mh.block_length_med = (size >> 8) & 0xff; mh.block_length_lo = size & 0xff; memset(&cgc, 0, sizeof(cgc)); cgc.cmd[0] = 0x15; cgc.cmd[1] = 1 << 4; cgc.cmd[4] = 12; cgc.buflen = sizeof(mh); cgc.buffer = (char *) &mh; cgc.data_direction = CGC_DATA_WRITE; mh.block_desc_length = 0x08; mh.block_length_med = (size >> 8) & 0xff; mh.block_length_lo = size & 0xff; return cdo->generic_packet(cdi, &cgc); } static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc, int cmd) { struct request_sense sense; struct cdrom_msf msf; int blocksize = 0, format = 0, lba; int ret; switch (cmd) { case CDROMREADRAW: blocksize = CD_FRAMESIZE_RAW; break; case CDROMREADMODE1: blocksize = CD_FRAMESIZE; format = 2; break; case CDROMREADMODE2: blocksize = CD_FRAMESIZE_RAW0; break; } IOCTL_IN(arg, struct cdrom_msf, msf); lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0); /* FIXME: we need upper bound checking, too!! */ if (lba < 0) return -EINVAL; cgc->buffer = kzalloc(blocksize, GFP_KERNEL); if (cgc->buffer == NULL) return -ENOMEM; memset(&sense, 0, sizeof(sense)); cgc->sense = &sense; cgc->data_direction = CGC_DATA_READ; ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize); if (ret && sense.sense_key == 0x05 && sense.asc == 0x20 && sense.ascq == 0x00) { /* * SCSI-II devices are not required to support * READ_CD, so let's try switching block size */ /* FIXME: switch back again... */ ret = cdrom_switch_blocksize(cdi, blocksize); if (ret) goto out; cgc->sense = NULL; ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1); ret |= cdrom_switch_blocksize(cdi, blocksize); } if (!ret && copy_to_user(arg, cgc->buffer, blocksize)) ret = -EFAULT; out: kfree(cgc->buffer); return ret; } static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, void __user *arg) { struct cdrom_read_audio ra; int lba; IOCTL_IN(arg, struct cdrom_read_audio, ra); if (ra.addr_format == CDROM_MSF) lba = msf_to_lba(ra.addr.msf.minute, ra.addr.msf.second, ra.addr.msf.frame); else if (ra.addr_format == CDROM_LBA) lba = ra.addr.lba; else return -EINVAL; /* FIXME: we need upper bound checking, too!! */ if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES) return -EINVAL; return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes); } static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, void __user *arg) { int ret; struct cdrom_subchnl q; u_char requested, back; IOCTL_IN(arg, struct cdrom_subchnl, q); requested = q.cdsc_format; if (!((requested == CDROM_MSF) || (requested == CDROM_LBA))) return -EINVAL; q.cdsc_format = CDROM_MSF; ret = cdrom_read_subchannel(cdi, &q, 0); if (ret) return ret; back = q.cdsc_format; /* local copy */ sanitize_format(&q.cdsc_absaddr, &back, requested); sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); IOCTL_OUT(arg, struct cdrom_subchnl, q); /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_msf msf; cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); IOCTL_IN(arg, struct cdrom_msf, msf); cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF; cgc->cmd[3] = msf.cdmsf_min0; cgc->cmd[4] = msf.cdmsf_sec0; cgc->cmd[5] = msf.cdmsf_frame0; cgc->cmd[6] = msf.cdmsf_min1; cgc->cmd[7] = msf.cdmsf_sec1; cgc->cmd[8] = msf.cdmsf_frame1; cgc->data_direction = CGC_DATA_NONE; return cdo->generic_packet(cdi, cgc); } static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_blk blk; cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); IOCTL_IN(arg, struct cdrom_blk, blk); cgc->cmd[0] = GPCMD_PLAY_AUDIO_10; cgc->cmd[2] = (blk.from >> 24) & 0xff; cgc->cmd[3] = (blk.from >> 16) & 0xff; cgc->cmd[4] = (blk.from >> 8) & 0xff; cgc->cmd[5] = blk.from & 0xff; cgc->cmd[7] = (blk.len >> 8) & 0xff; cgc->cmd[8] = blk.len & 0xff; cgc->data_direction = CGC_DATA_NONE; return cdo->generic_packet(cdi, cgc); } static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc, unsigned int cmd) { struct cdrom_volctrl volctrl; unsigned char buffer[32]; char mask[sizeof(buffer)]; unsigned short offset; int ret; cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n"); IOCTL_IN(arg, struct cdrom_volctrl, volctrl); cgc->buffer = buffer; cgc->buflen = 24; ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0); if (ret) return ret; /* originally the code depended on buffer[1] to determine how much data is available for transfer. buffer[1] is unfortunately ambigious and the only reliable way seem to be to simply skip over the block descriptor... */ offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6)); if (offset + 16 > sizeof(buffer)) return -E2BIG; if (offset + 16 > cgc->buflen) { cgc->buflen = offset + 16; ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0); if (ret) return ret; } /* sanity check */ if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE || buffer[offset + 1] < 14) return -EINVAL; /* now we have the current volume settings. if it was only a CDROMVOLREAD, return these values */ if (cmd == CDROMVOLREAD) { volctrl.channel0 = buffer[offset+9]; volctrl.channel1 = buffer[offset+11]; volctrl.channel2 = buffer[offset+13]; volctrl.channel3 = buffer[offset+15]; IOCTL_OUT(arg, struct cdrom_volctrl, volctrl); return 0; } /* get the volume mask */ cgc->buffer = mask; ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1); if (ret) return ret; buffer[offset + 9] = volctrl.channel0 & mask[offset + 9]; buffer[offset + 11] = volctrl.channel1 & mask[offset + 11]; buffer[offset + 13] = volctrl.channel2 & mask[offset + 13]; buffer[offset + 15] = volctrl.channel3 & mask[offset + 15]; /* set volume */ cgc->buffer = buffer + offset - 8; memset(cgc->buffer, 0, 8); return cdrom_mode_select(cdi, cgc); } static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, struct packet_command *cgc, int cmd) { struct cdrom_device_ops *cdo = cdi->ops; cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); cgc->cmd[0] = GPCMD_START_STOP_UNIT; cgc->cmd[1] = 1; cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0; cgc->data_direction = CGC_DATA_NONE; return cdo->generic_packet(cdi, cgc); } static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, struct packet_command *cgc, int cmd) { struct cdrom_device_ops *cdo = cdi->ops; cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); cgc->cmd[0] = GPCMD_PAUSE_RESUME; cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; cgc->data_direction = CGC_DATA_NONE; return cdo->generic_packet(cdi, cgc); } static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc) { int ret; dvd_struct *s; int size = sizeof(dvd_struct); if (!CDROM_CAN(CDC_DVD)) return -ENOSYS; s = kmalloc(size, GFP_KERNEL); if (!s) return -ENOMEM; cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); if (copy_from_user(s, arg, size)) { kfree(s); return -EFAULT; } ret = dvd_read_struct(cdi, s, cgc); if (ret) goto out; if (copy_to_user(arg, s, size)) ret = -EFAULT; out: kfree(s); return ret; } static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi, void __user *arg) { int ret; dvd_authinfo ai; if (!CDROM_CAN(CDC_DVD)) return -ENOSYS; cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n"); IOCTL_IN(arg, dvd_authinfo, ai); ret = dvd_do_auth(cdi, &ai); if (ret) return ret; IOCTL_OUT(arg, dvd_authinfo, ai); return 0; } static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi, void __user *arg) { int ret; long next = 0; cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); ret = cdrom_get_next_writable(cdi, &next); if (ret) return ret; IOCTL_OUT(arg, long, next); return 0; } static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi, void __user *arg) { int ret; long last = 0; cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); ret = cdrom_get_last_written(cdi, &last); if (ret) return ret; IOCTL_OUT(arg, long, last); return 0; } static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) { struct packet_command cgc; void __user *userptr = (void __user *)arg; memset(&cgc, 0, sizeof(cgc)); /* build a unified command and queue it through cdo->generic_packet() */ switch (cmd) { case CDROMREADRAW: case CDROMREADMODE1: case CDROMREADMODE2: return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd); case CDROMREADAUDIO: return mmc_ioctl_cdrom_read_audio(cdi, userptr); case CDROMSUBCHNL: return mmc_ioctl_cdrom_subchannel(cdi, userptr); case CDROMPLAYMSF: return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc); case CDROMPLAYBLK: return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc); case CDROMVOLCTRL: case CDROMVOLREAD: return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd); case CDROMSTART: case CDROMSTOP: return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd); case CDROMPAUSE: case CDROMRESUME: return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd); case DVD_READ_STRUCT: return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc); case DVD_AUTH: return mmc_ioctl_dvd_auth(cdi, userptr); case CDROM_NEXT_WRITABLE: return mmc_ioctl_cdrom_next_writable(cdi, userptr); case CDROM_LAST_WRITTEN: return mmc_ioctl_cdrom_last_written(cdi, userptr); } return -ENOTTY; } static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type, track_information *ti) { struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; int ret, buflen; init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; cgc.cmd[1] = type & 3; cgc.cmd[4] = (track & 0xff00) >> 8; cgc.cmd[5] = track & 0xff; cgc.cmd[8] = 8; cgc.quiet = 1; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; buflen = be16_to_cpu(ti->track_information_length) + sizeof(ti->track_information_length); if (buflen > sizeof(track_information)) buflen = sizeof(track_information); cgc.cmd[8] = cgc.buflen = buflen; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; /* return actual fill size */ return buflen; } /* requires CD R/RW */ static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di) { struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; int ret, buflen; /* set up command and get the disc info */ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_DISC_INFO; cgc.cmd[8] = cgc.buflen = 2; cgc.quiet = 1; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; /* not all drives have the same disc_info length, so requeue * packet with the length the drive tells us it can supply */ buflen = be16_to_cpu(di->disc_information_length) + sizeof(di->disc_information_length); if (buflen > sizeof(disc_information)) buflen = sizeof(disc_information); cgc.cmd[8] = cgc.buflen = buflen; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; /* return actual fill size */ return buflen; } /* return the last written block on the CD-R media. this is for the udf file system. */ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) { struct cdrom_tocentry toc; disc_information di; track_information ti; __u32 last_track; int ret = -1, ti_size; if (!CDROM_CAN(CDC_GENERIC_PACKET)) goto use_toc; ret = cdrom_get_disc_info(cdi, &di); if (ret < (int)(offsetof(typeof(di), last_track_lsb) + sizeof(di.last_track_lsb))) goto use_toc; /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ last_track = (di.last_track_msb << 8) | di.last_track_lsb; ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); if (ti_size < (int)offsetof(typeof(ti), track_start)) goto use_toc; /* if this track is blank, try the previous. */ if (ti.blank) { if (last_track==1) goto use_toc; last_track--; ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); } if (ti_size < (int)(offsetof(typeof(ti), track_size) + sizeof(ti.track_size))) goto use_toc; /* if last recorded field is valid, return it. */ if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) + sizeof(ti.last_rec_address))) { *last_written = be32_to_cpu(ti.last_rec_address); } else { /* make it up instead */ *last_written = be32_to_cpu(ti.track_start) + be32_to_cpu(ti.track_size); if (ti.free_blocks) *last_written -= (be32_to_cpu(ti.free_blocks) + 7); } return 0; /* this is where we end up if the drive either can't do a GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if it doesn't give enough information or fails. then we return the toc contents. */ use_toc: toc.cdte_format = CDROM_MSF; toc.cdte_track = CDROM_LEADOUT; if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) return ret; sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA); *last_written = toc.cdte_addr.lba; return 0; } /* return the next writable block. also for udf file system. */ static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable) { disc_information di; track_information ti; __u16 last_track; int ret, ti_size; if (!CDROM_CAN(CDC_GENERIC_PACKET)) goto use_last_written; ret = cdrom_get_disc_info(cdi, &di); if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb) + sizeof(di.last_track_lsb)) goto use_last_written; /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ last_track = (di.last_track_msb << 8) | di.last_track_lsb; ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) goto use_last_written; /* if this track is blank, try the previous. */ if (ti.blank) { if (last_track == 1) goto use_last_written; last_track--; ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); if (ti_size < 0) goto use_last_written; } /* if next recordable address field is valid, use it. */ if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) + sizeof(ti.next_writable)) { *next_writable = be32_to_cpu(ti.next_writable); return 0; } use_last_written: if ((ret = cdrom_get_last_written(cdi, next_writable))) { *next_writable = 0; return ret; } else { *next_writable += 7; return 0; } } EXPORT_SYMBOL(cdrom_get_last_written); EXPORT_SYMBOL(register_cdrom); EXPORT_SYMBOL(unregister_cdrom); EXPORT_SYMBOL(cdrom_open); EXPORT_SYMBOL(cdrom_release); EXPORT_SYMBOL(cdrom_ioctl); EXPORT_SYMBOL(cdrom_media_changed); EXPORT_SYMBOL(cdrom_number_of_slots); EXPORT_SYMBOL(cdrom_mode_select); EXPORT_SYMBOL(cdrom_mode_sense); EXPORT_SYMBOL(init_cdrom_command); EXPORT_SYMBOL(cdrom_get_media_event); #ifdef CONFIG_SYSCTL #define CDROM_STR_SIZE 1000 static struct cdrom_sysctl_settings { char info[CDROM_STR_SIZE]; /* general info */ int autoclose; /* close tray upon mount, etc */ int autoeject; /* eject on umount */ int debug; /* turn on debugging messages */ int lock; /* lock the door on device open */ int check; /* check media type */ } cdrom_sysctl_settings; enum cdrom_print_option { CTL_NAME, CTL_SPEED, CTL_SLOTS, CTL_CAPABILITY }; static int cdrom_print_info(const char *header, int val, char *info, int *pos, enum cdrom_print_option option) { const int max_size = sizeof(cdrom_sysctl_settings.info); struct cdrom_device_info *cdi; int ret; ret = scnprintf(info + *pos, max_size - *pos, header); if (!ret) return 1; *pos += ret; list_for_each_entry(cdi, &cdrom_list, list) { switch (option) { case CTL_NAME: ret = scnprintf(info + *pos, max_size - *pos, "\t%s", cdi->name); break; case CTL_SPEED: ret = scnprintf(info + *pos, max_size - *pos, "\t%d", cdi->speed); break; case CTL_SLOTS: ret = scnprintf(info + *pos, max_size - *pos, "\t%d", cdi->capacity); break; case CTL_CAPABILITY: ret = scnprintf(info + *pos, max_size - *pos, "\t%d", CDROM_CAN(val) != 0); break; default: pr_info("invalid option%d\n", option); return 1; } if (!ret) return 1; *pos += ret; } return 0; } static int cdrom_sysctl_info(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int pos; char *info = cdrom_sysctl_settings.info; const int max_size = sizeof(cdrom_sysctl_settings.info); if (!*lenp || (*ppos && !write)) { *lenp = 0; return 0; } mutex_lock(&cdrom_mutex); pos = sprintf(info, "CD-ROM information, " VERSION "\n"); if (cdrom_print_info("\ndrive name:\t", 0, info, &pos, CTL_NAME)) goto done; if (cdrom_print_info("\ndrive speed:\t", 0, info, &pos, CTL_SPEED)) goto done; if (cdrom_print_info("\ndrive # of slots:", 0, info, &pos, CTL_SLOTS)) goto done; if (cdrom_print_info("\nCan close tray:\t", CDC_CLOSE_TRAY, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan open tray:\t", CDC_OPEN_TRAY, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan lock tray:\t", CDC_LOCK, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan change speed:", CDC_SELECT_SPEED, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan select disk:", CDC_SELECT_DISC, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan read multisession:", CDC_MULTI_SESSION, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan read MCN:\t", CDC_MCN, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nReports media changed:", CDC_MEDIA_CHANGED, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan play audio:\t", CDC_PLAY_AUDIO, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write CD-R:\t", CDC_CD_R, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write CD-RW:", CDC_CD_RW, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan read DVD:\t", CDC_DVD, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write DVD-R:", CDC_DVD_R, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write DVD-RAM:", CDC_DVD_RAM, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan read MRW:\t", CDC_MRW, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write MRW:\t", CDC_MRW_W, info, &pos, CTL_CAPABILITY)) goto done; if (cdrom_print_info("\nCan write RAM:\t", CDC_RAM, info, &pos, CTL_CAPABILITY)) goto done; if (!scnprintf(info + pos, max_size - pos, "\n\n")) goto done; doit: mutex_unlock(&cdrom_mutex); return proc_dostring(ctl, write, buffer, lenp, ppos); done: pr_info("info buffer too small\n"); goto doit; } /* Unfortunately, per device settings are not implemented through procfs/sysctl yet. When they are, this will naturally disappear. For now just update all drives. Later this will become the template on which new registered drives will be based. */ static void cdrom_update_settings(void) { struct cdrom_device_info *cdi; mutex_lock(&cdrom_mutex); list_for_each_entry(cdi, &cdrom_list, list) { if (autoclose && CDROM_CAN(CDC_CLOSE_TRAY)) cdi->options |= CDO_AUTO_CLOSE; else if (!autoclose) cdi->options &= ~CDO_AUTO_CLOSE; if (autoeject && CDROM_CAN(CDC_OPEN_TRAY)) cdi->options |= CDO_AUTO_EJECT; else if (!autoeject) cdi->options &= ~CDO_AUTO_EJECT; if (lockdoor && CDROM_CAN(CDC_LOCK)) cdi->options |= CDO_LOCK; else if (!lockdoor) cdi->options &= ~CDO_LOCK; if (check_media_type) cdi->options |= CDO_CHECK_TYPE; else cdi->options &= ~CDO_CHECK_TYPE; } mutex_unlock(&cdrom_mutex); } static int cdrom_sysctl_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write) { /* we only care for 1 or 0. */ autoclose = !!cdrom_sysctl_settings.autoclose; autoeject = !!cdrom_sysctl_settings.autoeject; debug = !!cdrom_sysctl_settings.debug; lockdoor = !!cdrom_sysctl_settings.lock; check_media_type = !!cdrom_sysctl_settings.check; /* update the option flags according to the changes. we don't have per device options through sysctl yet, but we will have and then this will disappear. */ cdrom_update_settings(); } return ret; } /* Place files in /proc/sys/dev/cdrom */ static ctl_table cdrom_table[] = { { .procname = "info", .data = &cdrom_sysctl_settings.info, .maxlen = CDROM_STR_SIZE, .mode = 0444, .proc_handler = cdrom_sysctl_info, }, { .procname = "autoclose", .data = &cdrom_sysctl_settings.autoclose, .maxlen = sizeof(int), .mode = 0644, .proc_handler = cdrom_sysctl_handler, }, { .procname = "autoeject", .data = &cdrom_sysctl_settings.autoeject, .maxlen = sizeof(int), .mode = 0644, .proc_handler = cdrom_sysctl_handler, }, { .procname = "debug", .data = &cdrom_sysctl_settings.debug, .maxlen = sizeof(int), .mode = 0644, .proc_handler = cdrom_sysctl_handler, }, { .procname = "lock", .data = &cdrom_sysctl_settings.lock, .maxlen = sizeof(int), .mode = 0644, .proc_handler = cdrom_sysctl_handler, }, { .procname = "check_media", .data = &cdrom_sysctl_settings.check, .maxlen = sizeof(int), .mode = 0644, .proc_handler = cdrom_sysctl_handler }, { } }; static ctl_table cdrom_cdrom_table[] = { { .procname = "cdrom", .maxlen = 0, .mode = 0555, .child = cdrom_table, }, { } }; /* Make sure that /proc/sys/dev is there */ static ctl_table cdrom_root_table[] = { { .procname = "dev", .maxlen = 0, .mode = 0555, .child = cdrom_cdrom_table, }, { } }; static struct ctl_table_header *cdrom_sysctl_header; static void cdrom_sysctl_register(void) { static int initialized; if (initialized == 1) return; cdrom_sysctl_header = register_sysctl_table(cdrom_root_table); /* set the defaults */ cdrom_sysctl_settings.autoclose = autoclose; cdrom_sysctl_settings.autoeject = autoeject; cdrom_sysctl_settings.debug = debug; cdrom_sysctl_settings.lock = lockdoor; cdrom_sysctl_settings.check = check_media_type; initialized = 1; } static void cdrom_sysctl_unregister(void) { if (cdrom_sysctl_header) unregister_sysctl_table(cdrom_sysctl_header); } #else /* CONFIG_SYSCTL */ static void cdrom_sysctl_register(void) { } static void cdrom_sysctl_unregister(void) { } #endif /* CONFIG_SYSCTL */ static int __init cdrom_init(void) { cdrom_sysctl_register(); return 0; } static void __exit cdrom_exit(void) { pr_info("Uniform CD-ROM driver unloaded\n"); cdrom_sysctl_unregister(); } module_init(cdrom_init); module_exit(cdrom_exit); MODULE_LICENSE("GPL");
gpl-2.0
supertoast/kernel-2.6.38.6-U8815-Gingerbread
arch/ia64/xen/grant-table.c
3751
4100
/****************************************************************************** * arch/ia64/xen/grant-table.c * * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/mm.h> #include <xen/interface/xen.h> #include <xen/interface/memory.h> #include <xen/grant_table.h> #include <asm/xen/hypervisor.h> struct vm_struct *xen_alloc_vm_area(unsigned long size) { int order; unsigned long virt; unsigned long nr_pages; struct vm_struct *area; order = get_order(size); virt = __get_free_pages(GFP_KERNEL, order); if (virt == 0) goto err0; nr_pages = 1 << order; scrub_pages(virt, nr_pages); area = kmalloc(sizeof(*area), GFP_KERNEL); if (area == NULL) goto err1; area->flags = VM_IOREMAP; area->addr = (void *)virt; area->size = size; area->pages = NULL; area->nr_pages = nr_pages; area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */ return area; err1: free_pages(virt, order); err0: return NULL; } EXPORT_SYMBOL_GPL(xen_alloc_vm_area); void xen_free_vm_area(struct vm_struct *area) { unsigned int order = get_order(area->size); unsigned long i; unsigned long phys_addr = __pa(area->addr); /* This area is used for foreign page mappping. * So underlying machine page may not be assigned. */ for (i = 0; i < (1 << order); i++) { unsigned long ret; unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i; struct xen_memory_reservation reservation = { .nr_extents = 1, .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gpfn); ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); BUG_ON(ret != 1); } free_pages((unsigned long)area->addr, order); kfree(area); } EXPORT_SYMBOL_GPL(xen_free_vm_area); /**************************************************************************** * grant table hack * cmd: GNTTABOP_xxx */ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared) { *__shared = __va(frames[0] << PAGE_SHIFT); return 0; } void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes) { /* nothing */ } static void gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) { uint32_t flags; flags = uop->flags; if (flags & GNTMAP_host_map) { if (flags & GNTMAP_application_map) { printk(KERN_DEBUG "GNTMAP_application_map is not supported yet: " "flags 0x%x\n", flags); BUG(); } if (flags & GNTMAP_contains_pte) { printk(KERN_DEBUG "GNTMAP_contains_pte is not supported yet: " "flags 0x%x\n", flags); BUG(); } } else if (flags & GNTMAP_device_map) { printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); BUG(); /* not yet. actually this flag is not used. */ } else { BUG(); } } int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { if (cmd == GNTTABOP_map_grant_ref) { unsigned int i; for (i = 0; i < count; i++) { gnttab_map_grant_ref_pre( (struct gnttab_map_grant_ref *)uop + i); } } return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
gpl-2.0
londbell/ZTE_U988S_JellyBean-4.2.2-Kernel-3.4.35
drivers/staging/octeon/ethernet-rx.c
4775
15546
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cache.h> #include <linux/cpumask.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/string.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-wqe.h> #include <asm/octeon/cvmx-fau.h> #include <asm/octeon/cvmx-pow.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-scratch.h> #include <asm/octeon/cvmx-gmxx-defs.h> struct cvm_napi_wrapper { struct napi_struct napi; } ____cacheline_aligned_in_smp; static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; struct cvm_oct_core_state { int baseline_cores; /* * The number of additional cores that could be processing * input packtes. */ atomic_t available_cores; cpumask_t cpu_state; } ____cacheline_aligned_in_smp; static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; static void cvm_oct_enable_napi(void *_) { int cpu = smp_processor_id(); napi_schedule(&cvm_oct_napi[cpu].napi); } static void cvm_oct_enable_one_cpu(void) { int v; int cpu; /* Check to see if more CPUs are available for receive processing... */ v = atomic_sub_if_positive(1, &core_state.available_cores); if (v < 0) return; /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ for_each_online_cpu(cpu) { if (!cpu_test_and_set(cpu, core_state.cpu_state)) { v = smp_call_function_single(cpu, cvm_oct_enable_napi, NULL, 0); if (v) panic("Can't enable NAPI."); break; } } } static void cvm_oct_no_more_work(void) { int cpu = smp_processor_id(); /* * CPU zero is special. It always has the irq enabled when * waiting for incoming packets. */ if (cpu == 0) { enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); return; } cpu_clear(cpu, core_state.cpu_state); atomic_add(1, &core_state.available_cores); } /** * cvm_oct_do_interrupt - interrupt handler. * * The interrupt occurs whenever the POW has packets in our group. * */ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { /* Disable the IRQ and start napi_poll. */ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); cvm_oct_enable_napi(NULL); return IRQ_HANDLED; } /** * cvm_oct_check_rcv_error - process receive errors * @work: Work queue entry pointing to the packet. * * Returns Non-zero if the packet can be dropped, zero otherwise. */ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) { if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { /* * Ignore length errors on min size packets. Some * equipment incorrectly pads packets to 64+4FCS * instead of 60+4FCS. Note these packets still get * counted as frame errors. */ } else if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) { /* * We received a packet with either an alignment error * or a FCS error. This may be signalling that we are * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} * off. If this is the case we need to parse the * packet to determine if we can remove a non spec * preamble and generate a correct packet. */ int interface = cvmx_helper_get_interface_num(work->ipprt); int index = cvmx_helper_get_interface_index_num(work->ipprt); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr); int i = 0; while (i < work->len - 1) { if (*ptr != 0x55) break; ptr++; i++; } if (*ptr == 0xd5) { /* printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i + 1; work->len -= i + 5; } else if ((*ptr & 0xf) == 0xd) { /* printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i; work->len -= i + 4; for (i = 0; i < work->len; i++) { *ptr = ((*ptr & 0xf0) >> 4) | ((*(ptr + 1) & 0xf) << 4); ptr++; } } else { printk_ratelimited("Port %d unknown preamble, packet " "dropped\n", work->ipprt); /* cvmx_helper_dump_packet(work); */ cvm_oct_free_work(work); return 1; } } } else { printk_ratelimited("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code); cvm_oct_free_work(work); return 1; } return 0; } /** * cvm_oct_napi_poll - the NAPI poll function. * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller * @budget: Maximum number of packets to receive. * * Returns the number of packets processed. */ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { const int coreid = cvmx_get_core_num(); uint64_t old_group_mask; uint64_t old_scratch; int rx_count = 0; int did_work_request = 0; int packet_not_copied; /* Prefetch cvm_oct_device since we know we need it soon */ prefetch(cvm_oct_device); if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } /* Only allow work for our group (and preserve priorities) */ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); if (USE_ASYNC_IOBDMA) { cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } while (rx_count < budget) { struct sk_buff *skb = NULL; struct sk_buff **pskb = NULL; int skb_in_hw; cvmx_wqe_t *work; if (USE_ASYNC_IOBDMA && did_work_request) work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); else work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); prefetch(work); did_work_request = 0; if (work == NULL) { union cvmx_pow_wq_int wq_int; wq_int.u64 = 0; wq_int.s.iq_dis = 1 << pow_receive_group; wq_int.s.wq_int = 1 << pow_receive_group; cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); break; } pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); prefetch(pskb); if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } if (rx_count == 0) { /* * First time through, see if there is enough * work waiting to merit waking another * CPU. */ union cvmx_pow_wq_int_cntx counts; int backlog; int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); backlog = counts.s.iq_cnt + counts.s.ds_cnt; if (backlog > budget * cores_in_use && napi != NULL) cvm_oct_enable_one_cpu(); } skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; if (likely(skb_in_hw)) { skb = *pskb; prefetch(&skb->head); prefetch(&skb->len); } prefetch(cvm_oct_device[work->ipprt]); /* Immediately throw away all packets with receive errors */ if (unlikely(work->word2.snoip.rcv_error)) { if (cvm_oct_check_rcv_error(work)) continue; } /* * We can only use the zero copy path if skbuffs are * in the FPA pool and the packet fits in a single * buffer. */ if (likely(skb_in_hw)) { skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); prefetch(skb->data); skb->len = work->len; skb_set_tail_pointer(skb, skb->len); packet_not_copied = 1; } else { /* * We have to copy the packet. First allocate * an skbuff for it. */ skb = dev_alloc_skb(work->len); if (!skb) { printk_ratelimited("Port %d failed to allocate " "skbuff, packet dropped\n", work->ipprt); cvm_oct_free_work(work); continue; } /* * Check if we've received a packet that was * entirely stored in the work entry. */ if (unlikely(work->word2.s.bufs == 0)) { uint8_t *ptr = work->packet_data; if (likely(!work->word2.s.not_IP)) { /* * The beginning of the packet * moves for IP packets. */ if (work->word2.s.is_v6) ptr += 2; else ptr += 6; } memcpy(skb_put(skb, work->len), ptr, work->len); /* No packet buffers to free */ } else { int segments = work->word2.s.bufs; union cvmx_buf_ptr segment_ptr = work->packet_ptr; int len = work->len; while (segments--) { union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); /* * Octeon Errata PKI-100: The segment size is * wrong. Until it is fixed, calculate the * segment size based on the packet pool * buffer size. When it is fixed, the * following line should be replaced with this * one: int segment_size = * segment_ptr.s.size; */ int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); /* * Don't copy more than what * is left in the packet. */ if (segment_size > len) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size); len -= segment_size; segment_ptr = next_ptr; } } packet_not_copied = 0; } if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && cvm_oct_device[work->ipprt])) { struct net_device *dev = cvm_oct_device[work->ipprt]; struct octeon_ethernet *priv = netdev_priv(dev); /* * Only accept packets for devices that are * currently up. */ if (likely(dev->flags & IFF_UP)) { skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; /* Increment RX stats for virtual ports */ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); #else atomic_add(1, (atomic_t *)&priv->stats.rx_packets); atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); #endif } netif_receive_skb(skb); rx_count++; } else { /* Drop any packet received for a device that isn't up */ /* printk_ratelimited("%s: Device not up, packet dropped\n", dev->name); */ #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); #endif dev_kfree_skb_irq(skb); } } else { /* * Drop any packet received for a device that * doesn't exist. */ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", work->ipprt); dev_kfree_skb_irq(skb); } /* * Check to see if the skbuff and work share the same * packet buffer. */ if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { /* * This buffer needs to be replaced, increment * the number of buffers we need to free by * one. */ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 1); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); } else { cvm_oct_free_work(work); } } /* Restore the original POW group mask */ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); if (USE_ASYNC_IOBDMA) { /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); } cvm_oct_rx_refill_pool(0); if (rx_count < budget && napi != NULL) { /* No more work */ napi_complete(napi); cvm_oct_no_more_work(); } return rx_count; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * cvm_oct_poll_controller - poll for receive packets * device. * * @dev: Device to poll. Unused */ void cvm_oct_poll_controller(struct net_device *dev) { cvm_oct_napi_poll(NULL, 16); } #endif void cvm_oct_rx_initialize(void) { int i; struct net_device *dev_for_napi = NULL; union cvmx_pow_wq_int_thrx int_thr; union cvmx_pow_wq_int_pc int_pc; for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { if (cvm_oct_device[i]) { dev_for_napi = cvm_oct_device[i]; break; } } if (NULL == dev_for_napi) panic("No net_devices were allocated."); if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) atomic_set(&core_state.available_cores, max_rx_cpus); else atomic_set(&core_state.available_cores, num_online_cpus()); core_state.baseline_cores = atomic_read(&core_state.available_cores); core_state.cpu_state = CPU_MASK_NONE; for_each_possible_cpu(i) { netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, cvm_oct_napi_poll, rx_napi_weight); napi_enable(&cvm_oct_napi[i].napi); } /* Register an IRQ hander for to receive POW interrupts */ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_WORKQ0 + pow_receive_group); disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); int_thr.u64 = 0; int_thr.s.tc_en = 1; int_thr.s.tc_thr = 1; /* Enable POW interrupt when our port has at least one packet */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); int_pc.u64 = 0; int_pc.s.pc_thr = 5; cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); /* Scheduld NAPI now. This will indirectly enable interrupts. */ cvm_oct_enable_one_cpu(); } void cvm_oct_rx_shutdown(void) { int i; /* Shutdown all of the NAPIs */ for_each_possible_cpu(i) netif_napi_del(&cvm_oct_napi[i].napi); }
gpl-2.0
nychitman1/android_kernel_asus_flo
arch/arm/mach-sa1100/irq.c
4775
7235
/* * linux/arch/arm/mach-sa1100/irq.c * * Copyright (C) 1999-2001 Nicolas Pitre * * Generic IRQ handling for the SA11x0, GPIO 11-27 IRQ demultiplexing. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/ioport.h> #include <linux/syscore_ops.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/mach/irq.h> #include "generic.h" /* * SA1100 GPIO edge detection for IRQs: * IRQs are generated on Falling-Edge, Rising-Edge, or both. * Use this instead of directly setting GRER/GFER. */ static int GPIO_IRQ_rising_edge; static int GPIO_IRQ_falling_edge; static int GPIO_IRQ_mask = (1 << 11) - 1; /* * To get the GPIO number from an IRQ number */ #define GPIO_11_27_IRQ(i) ((i) - 21) #define GPIO11_27_MASK(irq) (1 << GPIO_11_27_IRQ(irq)) static int sa1100_gpio_type(struct irq_data *d, unsigned int type) { unsigned int mask; if (d->irq <= 10) mask = 1 << d->irq; else mask = GPIO11_27_MASK(d->irq); if (type == IRQ_TYPE_PROBE) { if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } if (type & IRQ_TYPE_EDGE_RISING) { GPIO_IRQ_rising_edge |= mask; } else GPIO_IRQ_rising_edge &= ~mask; if (type & IRQ_TYPE_EDGE_FALLING) { GPIO_IRQ_falling_edge |= mask; } else GPIO_IRQ_falling_edge &= ~mask; GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; return 0; } /* * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 10. */ static void sa1100_low_gpio_ack(struct irq_data *d) { GEDR = (1 << d->irq); } static void sa1100_low_gpio_mask(struct irq_data *d) { ICMR &= ~(1 << d->irq); } static void sa1100_low_gpio_unmask(struct irq_data *d) { ICMR |= 1 << d->irq; } static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on) { if (on) PWER |= 1 << d->irq; else PWER &= ~(1 << d->irq); return 0; } static struct irq_chip sa1100_low_gpio_chip = { .name = "GPIO-l", .irq_ack = sa1100_low_gpio_ack, .irq_mask = sa1100_low_gpio_mask, .irq_unmask = sa1100_low_gpio_unmask, .irq_set_type = sa1100_gpio_type, .irq_set_wake = sa1100_low_gpio_wake, }; /* * IRQ11 (GPIO11 through 27) handler. We enter here with the * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ static void sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc) { unsigned int mask; mask = GEDR & 0xfffff800; do { /* * clear down all currently active IRQ sources. * We will be processing them all. */ GEDR = mask; irq = IRQ_GPIO11; mask >>= 11; do { if (mask & 1) generic_handle_irq(irq); mask >>= 1; irq++; } while (mask); mask = GEDR & 0xfffff800; } while (mask); } /* * Like GPIO0 to 10, GPIO11-27 IRQs need to be handled specially. * In addition, the IRQs are all collected up into one bit in the * interrupt controller registers. */ static void sa1100_high_gpio_ack(struct irq_data *d) { unsigned int mask = GPIO11_27_MASK(d->irq); GEDR = mask; } static void sa1100_high_gpio_mask(struct irq_data *d) { unsigned int mask = GPIO11_27_MASK(d->irq); GPIO_IRQ_mask &= ~mask; GRER &= ~mask; GFER &= ~mask; } static void sa1100_high_gpio_unmask(struct irq_data *d) { unsigned int mask = GPIO11_27_MASK(d->irq); GPIO_IRQ_mask |= mask; GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; } static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on) { if (on) PWER |= GPIO11_27_MASK(d->irq); else PWER &= ~GPIO11_27_MASK(d->irq); return 0; } static struct irq_chip sa1100_high_gpio_chip = { .name = "GPIO-h", .irq_ack = sa1100_high_gpio_ack, .irq_mask = sa1100_high_gpio_mask, .irq_unmask = sa1100_high_gpio_unmask, .irq_set_type = sa1100_gpio_type, .irq_set_wake = sa1100_high_gpio_wake, }; /* * We don't need to ACK IRQs on the SA1100 unless they're GPIOs * this is for internal IRQs i.e. from 11 to 31. */ static void sa1100_mask_irq(struct irq_data *d) { ICMR &= ~(1 << d->irq); } static void sa1100_unmask_irq(struct irq_data *d) { ICMR |= (1 << d->irq); } /* * Apart form GPIOs, only the RTC alarm can be a wakeup event. */ static int sa1100_set_wake(struct irq_data *d, unsigned int on) { if (d->irq == IRQ_RTCAlrm) { if (on) PWER |= PWER_RTC; else PWER &= ~PWER_RTC; return 0; } return -EINVAL; } static struct irq_chip sa1100_normal_chip = { .name = "SC", .irq_ack = sa1100_mask_irq, .irq_mask = sa1100_mask_irq, .irq_unmask = sa1100_unmask_irq, .irq_set_wake = sa1100_set_wake, }; static struct resource irq_resource = DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs"); static struct sa1100irq_state { unsigned int saved; unsigned int icmr; unsigned int iclr; unsigned int iccr; } sa1100irq_state; static int sa1100irq_suspend(void) { struct sa1100irq_state *st = &sa1100irq_state; st->saved = 1; st->icmr = ICMR; st->iclr = ICLR; st->iccr = ICCR; /* * Disable all GPIO-based interrupts. */ ICMR &= ~(IC_GPIO11_27|IC_GPIO10|IC_GPIO9|IC_GPIO8|IC_GPIO7| IC_GPIO6|IC_GPIO5|IC_GPIO4|IC_GPIO3|IC_GPIO2| IC_GPIO1|IC_GPIO0); /* * Set the appropriate edges for wakeup. */ GRER = PWER & GPIO_IRQ_rising_edge; GFER = PWER & GPIO_IRQ_falling_edge; /* * Clear any pending GPIO interrupts. */ GEDR = GEDR; return 0; } static void sa1100irq_resume(void) { struct sa1100irq_state *st = &sa1100irq_state; if (st->saved) { ICCR = st->iccr; ICLR = st->iclr; GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; ICMR = st->icmr; } } static struct syscore_ops sa1100irq_syscore_ops = { .suspend = sa1100irq_suspend, .resume = sa1100irq_resume, }; static int __init sa1100irq_init_devicefs(void) { register_syscore_ops(&sa1100irq_syscore_ops); return 0; } device_initcall(sa1100irq_init_devicefs); void __init sa1100_init_irq(void) { unsigned int irq; request_resource(&iomem_resource, &irq_resource); /* disable all IRQs */ ICMR = 0; /* all IRQs are IRQ, not FIQ */ ICLR = 0; /* clear all GPIO edge detects */ GFER = 0; GRER = 0; GEDR = -1; /* * Whatever the doc says, this has to be set for the wait-on-irq * instruction to work... on a SA1100 rev 9 at least. */ ICCR = 1; for (irq = 0; irq <= 10; irq++) { irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, handle_edge_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } for (irq = 12; irq <= 31; irq++) { irq_set_chip_and_handler(irq, &sa1100_normal_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } for (irq = 32; irq <= 48; irq++) { irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, handle_edge_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } /* * Install handler for GPIO 11-27 edge detect interrupts */ irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip); irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); sa1100_init_gpio(); }
gpl-2.0
nitrogen-devs/android_NitrogenEx_kernel
drivers/ide/ali14xx.c
5031
6615
/* * Copyright (C) 1996 Linus Torvalds & author (see below) */ /* * ALI M14xx chipset EIDE controller * * Works for ALI M1439/1443/1445/1487/1489 chipsets. * * Adapted from code developed by derekn@vw.ece.cmu.edu. -ml * Derek's notes follow: * * I think the code should be pretty understandable, * but I'll be happy to (try to) answer questions. * * The critical part is in the setupDrive function. The initRegisters * function doesn't seem to be necessary, but the DOS driver does it, so * I threw it in. * * I've only tested this on my system, which only has one disk. I posted * it to comp.sys.linux.hardware, so maybe some other people will try it * out. * * Derek Noonburg (derekn@ece.cmu.edu) * 95-sep-26 * * Update 96-jul-13: * * I've since upgraded to two disks and a CD-ROM, with no trouble, and * I've also heard from several others who have used it successfully. * This driver appears to work with both the 1443/1445 and the 1487/1489 * chipsets. I've added support for PIO mode 4 for the 1487. This * seems to work just fine on the 1443 also, although I'm not sure it's * advertised as supporting mode 4. (I've been running a WDC AC21200 in * mode 4 for a while now with no trouble.) -Derek */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "ali14xx" /* port addresses for auto-detection */ #define ALI_NUM_PORTS 4 static const int ports[ALI_NUM_PORTS] __initdata = { 0x074, 0x0f4, 0x034, 0x0e4 }; /* register initialization data */ typedef struct { u8 reg, data; } RegInitializer; static const RegInitializer initData[] __initdata = { {0x01, 0x0f}, {0x02, 0x00}, {0x03, 0x00}, {0x04, 0x00}, {0x05, 0x00}, {0x06, 0x00}, {0x07, 0x2b}, {0x0a, 0x0f}, {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00}, {0x29, 0x00}, {0x2a, 0x00}, {0x2f, 0x00}, {0x2b, 0x00}, {0x2c, 0x00}, {0x2d, 0x00}, {0x2e, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0xff}, {0x35, 0x03}, {0x00, 0x00} }; /* timing parameter registers for each drive */ static struct { u8 reg1, reg2, reg3, reg4; } regTab[4] = { {0x03, 0x26, 0x04, 0x27}, /* drive 0 */ {0x05, 0x28, 0x06, 0x29}, /* drive 1 */ {0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */ {0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */ }; static int basePort; /* base port address */ static int regPort; /* port for register number */ static int dataPort; /* port for register data */ static u8 regOn; /* output to base port to access registers */ static u8 regOff; /* output to base port to close registers */ /*------------------------------------------------------------------------*/ /* * Read a controller register. */ static inline u8 inReg(u8 reg) { outb_p(reg, regPort); return inb(dataPort); } /* * Write a controller register. */ static void outReg(u8 data, u8 reg) { outb_p(reg, regPort); outb_p(data, dataPort); } static DEFINE_SPINLOCK(ali14xx_lock); /* * Set PIO mode for the specified drive. * This function computes timing parameters * and sets controller registers accordingly. */ static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int driveNum; int time1, time2; u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ time1 = ide_pio_cycle_time(drive, pio); time2 = t->active; param3 = param1 = (time2 * bus_speed + 999) / 1000; param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; if (pio < 3) { param3 += 8; param4 += 8; } printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n", drive->name, pio, time1, time2, param1, param2, param3, param4); /* stuff timing parameters into controller registers */ driveNum = (drive->hwif->index << 1) + (drive->dn & 1); spin_lock_irqsave(&ali14xx_lock, flags); outb_p(regOn, basePort); outReg(param1, regTab[driveNum].reg1); outReg(param2, regTab[driveNum].reg2); outReg(param3, regTab[driveNum].reg3); outReg(param4, regTab[driveNum].reg4); outb_p(regOff, basePort); spin_unlock_irqrestore(&ali14xx_lock, flags); } /* * Auto-detect the IDE controller port. */ static int __init findPort(void) { int i; u8 t; unsigned long flags; local_irq_save(flags); for (i = 0; i < ALI_NUM_PORTS; ++i) { basePort = ports[i]; regOff = inb(basePort); for (regOn = 0x30; regOn <= 0x33; ++regOn) { outb_p(regOn, basePort); if (inb(basePort) == regOn) { regPort = basePort + 4; dataPort = basePort + 8; t = inReg(0) & 0xf0; outb_p(regOff, basePort); local_irq_restore(flags); if (t != 0x50) return 0; return 1; /* success */ } } outb_p(regOff, basePort); } local_irq_restore(flags); return 0; } /* * Initialize controller registers with default values. */ static int __init initRegisters(void) { const RegInitializer *p; u8 t; unsigned long flags; local_irq_save(flags); outb_p(regOn, basePort); for (p = initData; p->reg != 0; ++p) outReg(p->data, p->reg); outb_p(0x01, regPort); t = inb(regPort) & 0x01; outb_p(regOff, basePort); local_irq_restore(flags); return t; } static const struct ide_port_ops ali14xx_port_ops = { .set_pio_mode = ali14xx_set_pio_mode, }; static const struct ide_port_info ali14xx_port_info = { .name = DRV_NAME, .chipset = ide_ali14xx, .port_ops = &ali14xx_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int __init ali14xx_probe(void) { printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n", basePort, regOn); /* initialize controller registers */ if (!initRegisters()) { printk(KERN_ERR "ali14xx: Chip initialization failed.\n"); return 1; } return ide_legacy_device_add(&ali14xx_port_info, 0); } static bool probe_ali14xx; module_param_named(probe, probe_ali14xx, bool, 0); MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); static int __init ali14xx_init(void) { if (probe_ali14xx == 0) goto out; /* auto-detect IDE controller port */ if (findPort()) { if (ali14xx_probe()) return -ENODEV; return 0; } printk(KERN_ERR "ali14xx: not found.\n"); out: return -ENODEV; } module_init(ali14xx_init); MODULE_AUTHOR("see local file"); MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets"); MODULE_LICENSE("GPL");
gpl-2.0
OnePlusOSS/android_kernel_oneplus_msm8974
sound/soc/davinci/davinci-i2s.c
5031
23357
/* * ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor * * Author: Vladimir Barinov, <vbarinov@embeddedalley.com> * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/clk.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <mach/asp.h> #include "davinci-pcm.h" #include "davinci-i2s.h" /* * NOTE: terminology here is confusing. * * - This driver supports the "Audio Serial Port" (ASP), * found on dm6446, dm355, and other DaVinci chips. * * - But it labels it a "Multi-channel Buffered Serial Port" * (McBSP) as on older chips like the dm642 ... which was * backward-compatible, possibly explaining that confusion. * * - OMAP chips have a controller called McBSP, which is * incompatible with the DaVinci flavor of McBSP. * * - Newer DaVinci chips have a controller called McASP, * incompatible with ASP and with either McBSP. * * In short: this uses ASP to implement I2S, not McBSP. * And it won't be the only DaVinci implemention of I2S. */ #define DAVINCI_MCBSP_DRR_REG 0x00 #define DAVINCI_MCBSP_DXR_REG 0x04 #define DAVINCI_MCBSP_SPCR_REG 0x08 #define DAVINCI_MCBSP_RCR_REG 0x0c #define DAVINCI_MCBSP_XCR_REG 0x10 #define DAVINCI_MCBSP_SRGR_REG 0x14 #define DAVINCI_MCBSP_PCR_REG 0x24 #define DAVINCI_MCBSP_SPCR_RRST (1 << 0) #define DAVINCI_MCBSP_SPCR_RINTM(v) ((v) << 4) #define DAVINCI_MCBSP_SPCR_XRST (1 << 16) #define DAVINCI_MCBSP_SPCR_XINTM(v) ((v) << 20) #define DAVINCI_MCBSP_SPCR_GRST (1 << 22) #define DAVINCI_MCBSP_SPCR_FRST (1 << 23) #define DAVINCI_MCBSP_SPCR_FREE (1 << 25) #define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_RCR_RFIG (1 << 18) #define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_RCR_RFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_RCR_RPHASE BIT(31) #define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_XCR_XFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_XCR_XDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_XCR_XFIG (1 << 18) #define DAVINCI_MCBSP_XCR_XWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_XCR_XFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_XCR_XPHASE BIT(31) #define DAVINCI_MCBSP_SRGR_FWID(v) ((v) << 8) #define DAVINCI_MCBSP_SRGR_FPER(v) ((v) << 16) #define DAVINCI_MCBSP_SRGR_FSGM (1 << 28) #define DAVINCI_MCBSP_SRGR_CLKSM BIT(29) #define DAVINCI_MCBSP_PCR_CLKRP (1 << 0) #define DAVINCI_MCBSP_PCR_CLKXP (1 << 1) #define DAVINCI_MCBSP_PCR_FSRP (1 << 2) #define DAVINCI_MCBSP_PCR_FSXP (1 << 3) #define DAVINCI_MCBSP_PCR_SCLKME (1 << 7) #define DAVINCI_MCBSP_PCR_CLKRM (1 << 8) #define DAVINCI_MCBSP_PCR_CLKXM (1 << 9) #define DAVINCI_MCBSP_PCR_FSRM (1 << 10) #define DAVINCI_MCBSP_PCR_FSXM (1 << 11) enum { DAVINCI_MCBSP_WORD_8 = 0, DAVINCI_MCBSP_WORD_12, DAVINCI_MCBSP_WORD_16, DAVINCI_MCBSP_WORD_20, DAVINCI_MCBSP_WORD_24, DAVINCI_MCBSP_WORD_32, }; static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = 1, [SNDRV_PCM_FORMAT_S16_LE] = 2, [SNDRV_PCM_FORMAT_S32_LE] = 4, }; static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8, [SNDRV_PCM_FORMAT_S16_LE] = DAVINCI_MCBSP_WORD_16, [SNDRV_PCM_FORMAT_S32_LE] = DAVINCI_MCBSP_WORD_32, }; static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE, [SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE, }; struct davinci_mcbsp_dev { struct device *dev; struct davinci_pcm_dma_params dma_params[2]; void __iomem *base; #define MOD_DSP_A 0 #define MOD_DSP_B 1 int mode; u32 pcr; struct clk *clk; /* * Combining both channels into 1 element will at least double the * amount of time between servicing the dma channel, increase * effiency, and reduce the chance of overrun/underrun. But, * it will result in the left & right channels being swapped. * * If relabeling the left and right channels is not possible, * you may want to let the codec know to swap them back. * * It may allow x10 the amount of time to service dma requests, * if the codec is master and is using an unnecessarily fast bit clock * (ie. tlvaic23b), independent of the sample rate. So, having an * entire frame at once means it can be serviced at the sample rate * instead of the bit clock rate. * * In the now unlikely case that an underrun still * occurs, both the left and right samples will be repeated * so that no pops are heard, and the left and right channels * won't end up being swapped because of the underrun. */ unsigned enable_channel_combine:1; unsigned int fmt; int clk_div; int clk_input_pin; bool i2s_accurate_sck; }; static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev, int reg, u32 val) { __raw_writel(val, dev->base + reg); } static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg) { return __raw_readl(dev->base + reg); } static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback) { u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP; /* The clock needs to toggle to complete reset. * So, fake it by toggling the clk polarity. */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr); } static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_platform *platform = rtd->platform; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); u32 spcr; u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST; spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (spcr & mask) { /* start off disabled */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr & ~mask); toggle_clock(dev, playback); } if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) { /* Start the sample generator */ spcr |= DAVINCI_MCBSP_SPCR_GRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } if (playback) { /* Stop the DMA to avoid data loss */ /* while the transmitter is out of reset to handle XSYNCERR */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); if (ret < 0) printk(KERN_DEBUG "Playback DMA stop failed\n"); } /* Enable the transmitter */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); /* wait for any unexpected frame sync error to occur */ udelay(100); /* Disable the transmitter to clear any outstanding XSYNCERR */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); /* Restart the DMA */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); if (ret < 0) printk(KERN_DEBUG "Playback DMA start failed\n"); } } /* Enable transmitter or receiver */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= mask; if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) { /* Start frame sync */ spcr |= DAVINCI_MCBSP_SPCR_FRST; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback) { u32 spcr; /* Reset transmitter/receiver and sample rate/frame sync generators */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST); spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); } #define DEFAULT_BITPERSAMPLE 16 static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); unsigned int pcr; unsigned int srgr; bool inv_fs = false; /* Attention srgr is updated by hw_params! */ srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) | DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1); dev->fmt = fmt; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* cpu is master */ pcr = DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case SND_SOC_DAIFMT_CBM_CFS: pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM; /* * Selection of the clock input pin that is the * input for the Sample Rate Generator. * McBSP FSR and FSX are driven by the Sample Rate * Generator. */ switch (dev->clk_input_pin) { case MCBSP_CLKS: pcr |= DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case MCBSP_CLKR: pcr |= DAVINCI_MCBSP_PCR_SCLKME; break; default: dev_err(dev->dev, "bad clk_input_pin\n"); return -EINVAL; } break; case SND_SOC_DAIFMT_CBM_CFM: /* codec is master */ pcr = 0; break; default: printk(KERN_ERR "%s:bad master\n", __func__); return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* Davinci doesn't support TRUE I2S, but some codecs will have * the left and right channels contiguous. This allows * dsp_a mode to be used with an inverted normal frame clk. * If your codec is master and does not have contiguous * channels, then you will have sound on only one channel. * Try using a different mode, or codec as slave. * * The TLV320AIC33 is an example of a codec where this works. * It has a variable bit clock frequency allowing it to have * valid data on every bit clock. * * The TLV320AIC23 is an example of a codec where this does not * work. It has a fixed bit clock frequency with progressively * more empty bit clock slots between channels as the sample * rate is lowered. */ inv_fs = true; case SND_SOC_DAIFMT_DSP_A: dev->mode = MOD_DSP_A; break; case SND_SOC_DAIFMT_DSP_B: dev->mode = MOD_DSP_B; break; default: printk(KERN_ERR "%s:bad format\n", __func__); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* CLKRP Receive clock polarity, * 1 - sampled on rising edge of CLKR * valid on rising edge * CLKXP Transmit clock polarity, * 1 - clocked on falling edge of CLKX * valid on rising edge * FSRP Receive frame sync pol, 0 - active high * FSXP Transmit frame sync pol, 0 - active high */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP); break; case SND_SOC_DAIFMT_IB_IF: /* CLKRP Receive clock polarity, * 0 - sampled on falling edge of CLKR * valid on falling edge * CLKXP Transmit clock polarity, * 0 - clocked on rising edge of CLKX * valid on falling edge * FSRP Receive frame sync pol, 1 - active low * FSXP Transmit frame sync pol, 1 - active low */ pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_NB_IF: /* CLKRP Receive clock polarity, * 1 - sampled on rising edge of CLKR * valid on rising edge * CLKXP Transmit clock polarity, * 1 - clocked on falling edge of CLKX * valid on rising edge * FSRP Receive frame sync pol, 1 - active low * FSXP Transmit frame sync pol, 1 - active low */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP | DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_IB_NF: /* CLKRP Receive clock polarity, * 0 - sampled on falling edge of CLKR * valid on falling edge * CLKXP Transmit clock polarity, * 0 - clocked on rising edge of CLKX * valid on falling edge * FSRP Receive frame sync pol, 0 - active high * FSXP Transmit frame sync pol, 0 - active high */ break; default: return -EINVAL; } if (inv_fs == true) pcr ^= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); dev->pcr = pcr; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr); return 0; } static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); if (div_id != DAVINCI_MCBSP_CLKGDV) return -ENODEV; dev->clk_div = div; return 0; } static int davinci_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); struct davinci_pcm_dma_params *dma_params = &dev->dma_params[substream->stream]; struct snd_interval *i = NULL; int mcbsp_word_length, master; unsigned int rcr, xcr, srgr, clk_div, freq, framesize; u32 spcr; snd_pcm_format_t fmt; unsigned element_cnt = 1; /* general line settings */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } else { spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK; fmt = params_format(params); mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: freq = clk_get_rate(dev->clk); srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_CLKSM; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); if (dev->i2s_accurate_sck) { clk_div = 256; do { framesize = (freq / (--clk_div)) / params->rate_num * params->rate_den; } while (((framesize < 33) || (framesize > 4095)) && (clk_div)); clk_div--; srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1); } else { /* symmetric waveforms */ clk_div = freq / (mcbsp_word_length * 16) / params->rate_num * params->rate_den; srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); } clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFS: srgr = DAVINCI_MCBSP_SRGR_FSGM; clk_div = dev->clk_div - 1; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFM: /* Clock and frame sync given from external sources */ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); srgr = DAVINCI_MCBSP_SRGR_FSGM; srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1); pr_debug("%s - %d FWID set: re-read srgr = %X\n", __func__, __LINE__, snd_interval_value(i) - 1); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS); srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1); break; default: return -EINVAL; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); rcr = DAVINCI_MCBSP_RCR_RFIG; xcr = DAVINCI_MCBSP_XCR_XFIG; if (dev->mode == MOD_DSP_B) { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0); } else { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1); } /* Determine xfer data type */ fmt = params_format(params); if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) { printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n"); return -EINVAL; } if (params_channels(params) == 2) { element_cnt = 2; if (double_fmt[fmt] && dev->enable_channel_combine) { element_cnt = 1; fmt = double_fmt[fmt]; } switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0); rcr |= DAVINCI_MCBSP_RCR_RPHASE; xcr |= DAVINCI_MCBSP_XCR_XPHASE; break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1); break; default: return -EINVAL; } } dma_params->acnt = dma_params->data_type = data_type[fmt]; dma_params->fifo_level = 0; mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0); break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1); break; default: return -EINVAL; } rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length); xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr); else davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr); pr_debug("%s - %d srgr=%X\n", __func__, __LINE__, srgr); pr_debug("%s - %d xcr=%X\n", __func__, __LINE__, xcr); pr_debug("%s - %d rcr=%X\n", __func__, __LINE__, rcr); return 0; } static int davinci_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); return 0; } static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int ret = 0; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: davinci_mcbsp_start(dev, substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: davinci_mcbsp_stop(dev, playback); break; default: ret = -EINVAL; } return ret; } static int davinci_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); return 0; } static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); } #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 static const struct snd_soc_dai_ops davinci_i2s_dai_ops = { .startup = davinci_i2s_startup, .shutdown = davinci_i2s_shutdown, .prepare = davinci_i2s_prepare, .trigger = davinci_i2s_trigger, .hw_params = davinci_i2s_hw_params, .set_fmt = davinci_i2s_set_dai_fmt, .set_clkdiv = davinci_i2s_dai_set_clkdiv, }; static struct snd_soc_dai_driver davinci_i2s_dai = { .playback = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .capture = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .ops = &davinci_i2s_dai_ops, }; static int davinci_i2s_probe(struct platform_device *pdev) { struct snd_platform_data *pdata = pdev->dev.platform_data; struct davinci_mcbsp_dev *dev; struct resource *mem, *ioarea, *res; enum dma_event_q asp_chan_q = EVENTQ_0; enum dma_event_q ram_chan_q = EVENTQ_1; int ret; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } ioarea = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "McBSP region already claimed\n"); return -EBUSY; } dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev), GFP_KERNEL); if (!dev) return -ENOMEM; if (pdata) { dev->enable_channel_combine = pdata->enable_channel_combine; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].sram_size = pdata->sram_size_playback; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].sram_size = pdata->sram_size_capture; dev->clk_input_pin = pdata->clk_input_pin; dev->i2s_accurate_sck = pdata->i2s_accurate_sck; asp_chan_q = pdata->asp_chan_q; ram_chan_q = pdata->ram_chan_q; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].ram_chan_q = ram_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].ram_chan_q = ram_chan_q; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return -ENODEV; clk_enable(dev->clk); dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!dev->base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG); dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG); /* first TX, then RX */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; dev->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, dev); ret = snd_soc_register_dai(&pdev->dev, &davinci_i2s_dai); if (ret != 0) goto err_release_clk; return 0; err_release_clk: clk_disable(dev->clk); clk_put(dev->clk); return ret; } static int davinci_i2s_remove(struct platform_device *pdev) { struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev); snd_soc_unregister_dai(&pdev->dev); clk_disable(dev->clk); clk_put(dev->clk); dev->clk = NULL; return 0; } static struct platform_driver davinci_mcbsp_driver = { .probe = davinci_i2s_probe, .remove = davinci_i2s_remove, .driver = { .name = "davinci-mcbsp", .owner = THIS_MODULE, }, }; module_platform_driver(davinci_mcbsp_driver); MODULE_AUTHOR("Vladimir Barinov"); MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
drewx2/android_kernel_htc_dlx
net/xfrm/xfrm_input.c
5031
6368
/* * xfrm_input.c * * Changes: * YOSHIFUJI Hideaki @USAGI * Split up af-specific portion * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> static struct kmem_cache *secpath_cachep __read_mostly; void __secpath_destroy(struct sec_path *sp) { int i; for (i = 0; i < sp->len; i++) xfrm_state_put(sp->xvec[i]); kmem_cache_free(secpath_cachep, sp); } EXPORT_SYMBOL(__secpath_destroy); struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); if (!sp) return NULL; sp->len = 0; if (src) { int i; memcpy(sp, src, sizeof(*sp)); for (i = 0; i < sp->len; i++) xfrm_state_hold(sp->xvec[i]); } atomic_set(&sp->refcnt, 1); return sp; } EXPORT_SYMBOL(secpath_dup); /* Fetch spi and seq from ipsec header */ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) { int offset, offset_seq; int hlen; switch (nexthdr) { case IPPROTO_AH: hlen = sizeof(struct ip_auth_hdr); offset = offsetof(struct ip_auth_hdr, spi); offset_seq = offsetof(struct ip_auth_hdr, seq_no); break; case IPPROTO_ESP: hlen = sizeof(struct ip_esp_hdr); offset = offsetof(struct ip_esp_hdr, spi); offset_seq = offsetof(struct ip_esp_hdr, seq_no); break; case IPPROTO_COMP: if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) return -EINVAL; *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2))); *seq = 0; return 0; default: return 1; } if (!pskb_may_pull(skb, hlen)) return -EINVAL; *spi = *(__be32*)(skb_transport_header(skb) + offset); *seq = *(__be32*)(skb_transport_header(skb) + offset_seq); return 0; } int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) { struct xfrm_mode *inner_mode = x->inner_mode; int err; err = x->outer_mode->afinfo->extract_input(x, skb); if (err) return err; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) return -EAFNOSUPPORT; } skb->protocol = inner_mode->afinfo->eth_proto; return inner_mode->input2(x, skb); } EXPORT_SYMBOL(xfrm_prepare_input); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; __be32 seq_hi; struct xfrm_state *x; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; unsigned int family; int decaps = 0; int async = 0; /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input.low; goto resume; } /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); seq_hi = htonl(xfrm_replay_seqhi(x, seq)); XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; skb_dst_force(skb); nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (async && x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } x->repl->advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) goto drop; } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); nf_reset(skb); if (decaps) { skb_dst_drop(skb); netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: kfree_skb(skb); return 0; } EXPORT_SYMBOL(xfrm_input); int xfrm_input_resume(struct sk_buff *skb, int nexthdr) { return xfrm_input(skb, nexthdr, 0, -1); } EXPORT_SYMBOL(xfrm_input_resume); void __init xfrm_input_init(void) { secpath_cachep = kmem_cache_create("secpath_cache", sizeof(struct sec_path), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); }
gpl-2.0
shankarathi07/linux_samsung_ics
drivers/staging/comedi/drivers/ni_6527.c
8103
13337
/* comedi/drivers/ni_6527.c driver for National Instruments PCI-6527 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_6527 Description: National Instruments 6527 Author: ds Status: works Devices: [National Instruments] PCI-6527 (ni6527), PXI-6527 Updated: Sat, 25 Jan 2003 13:24:40 -0800 */ /* Manuals (available from ftp://ftp.natinst.com/support/manuals) 370106b.pdf 6527 Register Level Programmer Manual */ #define DEBUG 1 #define DEBUG_FLAGS #include <linux/interrupt.h> #include "../comedidev.h" #include "mite.h" #define NI6527_DIO_SIZE 4096 #define NI6527_MITE_SIZE 4096 #define Port_Register(x) (0x00+(x)) #define ID_Register 0x06 #define Clear_Register 0x07 #define ClrEdge 0x08 #define ClrOverflow 0x04 #define ClrFilter 0x02 #define ClrInterval 0x01 #define Filter_Interval(x) (0x08+(x)) #define Filter_Enable(x) (0x0c+(x)) #define Change_Status 0x14 #define MasterInterruptStatus 0x04 #define Overflow 0x02 #define EdgeStatus 0x01 #define Master_Interrupt_Control 0x15 #define FallingEdgeIntEnable 0x10 #define RisingEdgeIntEnable 0x08 #define MasterInterruptEnable 0x04 #define OverflowIntEnable 0x02 #define EdgeIntEnable 0x01 #define Rising_Edge_Detection_Enable(x) (0x018+(x)) #define Falling_Edge_Detection_Enable(x) (0x020+(x)) static int ni6527_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int ni6527_detach(struct comedi_device *dev); static struct comedi_driver driver_ni6527 = { .driver_name = "ni6527", .module = THIS_MODULE, .attach = ni6527_attach, .detach = ni6527_detach, }; struct ni6527_board { int dev_id; const char *name; }; static const struct ni6527_board ni6527_boards[] = { { .dev_id = 0x2b20, .name = "pci-6527", }, { .dev_id = 0x2b10, .name = "pxi-6527", }, }; #define n_ni6527_boards ARRAY_SIZE(ni6527_boards) #define this_board ((const struct ni6527_board *)dev->board_ptr) static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = { {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b10)}, {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b20)}, {0} }; MODULE_DEVICE_TABLE(pci, ni6527_pci_table); struct ni6527_private { struct mite_struct *mite; unsigned int filter_interval; unsigned int filter_enable; }; #define devpriv ((struct ni6527_private *)dev->private) static int ni6527_find_device(struct comedi_device *dev, int bus, int slot); static int ni6527_di_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); unsigned int interval; if (insn->n != 2) return -EINVAL; if (data[0] != INSN_CONFIG_FILTER) return -EINVAL; if (data[1]) { interval = (data[1] + 100) / 200; data[1] = interval * 200; if (interval != devpriv->filter_interval) { writeb(interval & 0xff, devpriv->mite->daq_io_addr + Filter_Interval(0)); writeb((interval >> 8) & 0xff, devpriv->mite->daq_io_addr + Filter_Interval(1)); writeb((interval >> 16) & 0x0f, devpriv->mite->daq_io_addr + Filter_Interval(2)); writeb(ClrInterval, devpriv->mite->daq_io_addr + Clear_Register); devpriv->filter_interval = interval; } devpriv->filter_enable |= 1 << chan; } else { devpriv->filter_enable &= ~(1 << chan); } writeb(devpriv->filter_enable, devpriv->mite->daq_io_addr + Filter_Enable(0)); writeb(devpriv->filter_enable >> 8, devpriv->mite->daq_io_addr + Filter_Enable(1)); writeb(devpriv->filter_enable >> 16, devpriv->mite->daq_io_addr + Filter_Enable(2)); return 2; } static int ni6527_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; data[1] = readb(devpriv->mite->daq_io_addr + Port_Register(0)); data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(1)) << 8; data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(2)) << 16; return 2; } static int ni6527_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); /* The open relay state on the board cooresponds to 1, * but in Comedi, it is represented by 0. */ if (data[0] & 0x0000ff) { writeb((s->state ^ 0xff), devpriv->mite->daq_io_addr + Port_Register(3)); } if (data[0] & 0x00ff00) { writeb((s->state >> 8) ^ 0xff, devpriv->mite->daq_io_addr + Port_Register(4)); } if (data[0] & 0xff0000) { writeb((s->state >> 16) ^ 0xff, devpriv->mite->daq_io_addr + Port_Register(5)); } } data[1] = s->state; return 2; } static irqreturn_t ni6527_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 2; unsigned int status; status = readb(devpriv->mite->daq_io_addr + Change_Status); if ((status & MasterInterruptStatus) == 0) return IRQ_NONE; if ((status & EdgeStatus) == 0) return IRQ_NONE; writeb(ClrEdge | ClrOverflow, devpriv->mite->daq_io_addr + Clear_Register); comedi_buf_put(s->async, 0); s->async->events |= COMEDI_CB_EOS; comedi_event(dev, s); return IRQ_HANDLED; } static int ni6527_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_OTHER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_FOLLOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and */ /* are mutually compatible */ if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } if (cmd->scan_end_arg != 1) { cmd->scan_end_arg = 1; err++; } if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } if (err) return 3; /* step 4: fix up any arguments */ if (err) return 4; return 0; } static int ni6527_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { /* struct comedi_cmd *cmd = &s->async->cmd; */ writeb(ClrEdge | ClrOverflow, devpriv->mite->daq_io_addr + Clear_Register); writeb(FallingEdgeIntEnable | RisingEdgeIntEnable | MasterInterruptEnable | EdgeIntEnable, devpriv->mite->daq_io_addr + Master_Interrupt_Control); return 0; } static int ni6527_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); return 0; } static int ni6527_intr_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; data[1] = 0; return 2; } static int ni6527_intr_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; if (data[0] != INSN_CONFIG_CHANGE_NOTIFY) return -EINVAL; writeb(data[1], devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(0)); writeb(data[1] >> 8, devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(1)); writeb(data[1] >> 16, devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(2)); writeb(data[2], devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(0)); writeb(data[2] >> 8, devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(1)); writeb(data[2] >> 16, devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(2)); return 2; } static int ni6527_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; printk(KERN_INFO "comedi%d: ni6527\n", dev->minor); ret = alloc_private(dev, sizeof(struct ni6527_private)); if (ret < 0) return ret; ret = ni6527_find_device(dev, it->options[0], it->options[1]); if (ret < 0) return ret; ret = mite_setup(devpriv->mite); if (ret < 0) { printk(KERN_ERR "comedi: error setting up mite\n"); return ret; } dev->board_name = this_board->name; printk(KERN_INFO "comedi board: %s, ID=0x%02x\n", dev->board_name, readb(devpriv->mite->daq_io_addr + ID_Register)); ret = alloc_subdevices(dev, 3); if (ret < 0) return ret; s = dev->subdevices + 0; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 24; s->range_table = &range_digital; s->maxdata = 1; s->insn_config = ni6527_di_insn_config; s->insn_bits = ni6527_di_insn_bits; s = dev->subdevices + 1; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->range_table = &range_unknown; /* FIXME: actually conductance */ s->maxdata = 1; s->insn_bits = ni6527_do_insn_bits; s = dev->subdevices + 2; dev->read_subdev = s; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->range_table = &range_unknown; s->maxdata = 1; s->do_cmdtest = ni6527_intr_cmdtest; s->do_cmd = ni6527_intr_cmd; s->cancel = ni6527_intr_cancel; s->insn_bits = ni6527_intr_insn_bits; s->insn_config = ni6527_intr_insn_config; writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(0)); writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(1)); writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(2)); writeb(ClrEdge | ClrOverflow | ClrFilter | ClrInterval, devpriv->mite->daq_io_addr + Clear_Register); writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); ret = request_irq(mite_irq(devpriv->mite), ni6527_interrupt, IRQF_SHARED, "ni6527", dev); if (ret < 0) printk(KERN_WARNING "comedi i6527 irq not available\n"); else dev->irq = mite_irq(devpriv->mite); return 0; } static int ni6527_detach(struct comedi_device *dev) { if (devpriv && devpriv->mite && devpriv->mite->daq_io_addr) writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); if (dev->irq) free_irq(dev->irq, dev); if (devpriv && devpriv->mite) mite_unsetup(devpriv->mite); return 0; } static int ni6527_find_device(struct comedi_device *dev, int bus, int slot) { struct mite_struct *mite; int i; for (mite = mite_devices; mite; mite = mite->next) { if (mite->used) continue; if (bus || slot) { if (bus != mite->pcidev->bus->number || slot != PCI_SLOT(mite->pcidev->devfn)) continue; } for (i = 0; i < n_ni6527_boards; i++) { if (mite_device_id(mite) == ni6527_boards[i].dev_id) { dev->board_ptr = ni6527_boards + i; devpriv->mite = mite; return 0; } } } printk(KERN_ERR "comedi 6527: no device found\n"); mite_list_devices(); return -EIO; } static int __devinit driver_ni6527_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_ni6527.driver_name); } static void __devexit driver_ni6527_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_ni6527_pci_driver = { .id_table = ni6527_pci_table, .probe = &driver_ni6527_pci_probe, .remove = __devexit_p(&driver_ni6527_pci_remove) }; static int __init driver_ni6527_init_module(void) { int retval; retval = comedi_driver_register(&driver_ni6527); if (retval < 0) return retval; driver_ni6527_pci_driver.name = (char *)driver_ni6527.driver_name; return pci_register_driver(&driver_ni6527_pci_driver); } static void __exit driver_ni6527_cleanup_module(void) { pci_unregister_driver(&driver_ni6527_pci_driver); comedi_driver_unregister(&driver_ni6527); } module_init(driver_ni6527_init_module); module_exit(driver_ni6527_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
gunine/htc-rider-ics-kernel
fs/ntfs/namei.c
9127
14446
/* * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS * project. * * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/dcache.h> #include <linux/exportfs.h> #include <linux/security.h> #include <linux/slab.h> #include "attrib.h" #include "debug.h" #include "dir.h" #include "mft.h" #include "ntfs.h" /** * ntfs_lookup - find the inode represented by a dentry in a directory inode * @dir_ino: directory inode in which to look for the inode * @dent: dentry representing the inode to look for * @nd: lookup nameidata * * In short, ntfs_lookup() looks for the inode represented by the dentry @dent * in the directory inode @dir_ino and if found attaches the inode to the * dentry @dent. * * In more detail, the dentry @dent specifies which inode to look for by * supplying the name of the inode in @dent->d_name.name. ntfs_lookup() * converts the name to Unicode and walks the contents of the directory inode * @dir_ino looking for the converted Unicode name. If the name is found in the * directory, the corresponding inode is loaded by calling ntfs_iget() on its * inode number and the inode is associated with the dentry @dent via a call to * d_splice_alias(). * * If the name is not found in the directory, a NULL inode is inserted into the * dentry @dent via a call to d_add(). The dentry is then termed a negative * dentry. * * Only if an actual error occurs, do we return an error via ERR_PTR(). * * In order to handle the case insensitivity issues of NTFS with regards to the * dcache and the dcache requiring only one dentry per directory, we deal with * dentry aliases that only differ in case in ->ntfs_lookup() while maintaining * a case sensitive dcache. This means that we get the full benefit of dcache * speed when the file/directory is looked up with the same case as returned by * ->ntfs_readdir() but that a lookup for any other case (or for the short file * name) will not find anything in dcache and will enter ->ntfs_lookup() * instead, where we search the directory for a fully matching file name * (including case) and if that is not found, we search for a file name that * matches with different case and if that has non-POSIX semantics we return * that. We actually do only one search (case sensitive) and keep tabs on * whether we have found a case insensitive match in the process. * * To simplify matters for us, we do not treat the short vs long filenames as * two hard links but instead if the lookup matches a short filename, we * return the dentry for the corresponding long filename instead. * * There are three cases we need to distinguish here: * * 1) @dent perfectly matches (i.e. including case) a directory entry with a * file name in the WIN32 or POSIX namespaces. In this case * ntfs_lookup_inode_by_name() will return with name set to NULL and we * just d_splice_alias() @dent. * 2) @dent matches (not including case) a directory entry with a file name in * the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return * with name set to point to a kmalloc()ed ntfs_name structure containing * the properly cased little endian Unicode name. We convert the name to the * current NLS code page, search if a dentry with this name already exists * and if so return that instead of @dent. At this point things are * complicated by the possibility of 'disconnected' dentries due to NFS * which we deal with appropriately (see the code comments). The VFS will * then destroy the old @dent and use the one we returned. If a dentry is * not found, we allocate a new one, d_splice_alias() it, and return it as * above. * 3) @dent matches either perfectly or not (i.e. we don't care about case) a * directory entry with a file name in the DOS namespace. In this case * ntfs_lookup_inode_by_name() will return with name set to point to a * kmalloc()ed ntfs_name structure containing the mft reference (cpu endian) * of the inode. We use the mft reference to read the inode and to find the * file name in the WIN32 namespace corresponding to the matched short file * name. We then convert the name to the current NLS code page, and proceed * searching for a dentry with this name, etc, as in case 2), above. * * Locking: Caller must hold i_mutex on the directory. */ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, struct nameidata *nd) { ntfs_volume *vol = NTFS_SB(dir_ino->i_sb); struct inode *dent_inode; ntfschar *uname; ntfs_name *name = NULL; MFT_REF mref; unsigned long dent_ino; int uname_len; ntfs_debug("Looking up %s in directory inode 0x%lx.", dent->d_name.name, dir_ino->i_ino); /* Convert the name of the dentry to Unicode. */ uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, &uname); if (uname_len < 0) { if (uname_len != -ENAMETOOLONG) ntfs_error(vol->sb, "Failed to convert name to " "Unicode."); return ERR_PTR(uname_len); } mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, &name); kmem_cache_free(ntfs_name_cache, uname); if (!IS_ERR_MREF(mref)) { dent_ino = MREF(mref); ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino); dent_inode = ntfs_iget(vol->sb, dent_ino); if (likely(!IS_ERR(dent_inode))) { /* Consistency check. */ if (is_bad_inode(dent_inode) || MSEQNO(mref) == NTFS_I(dent_inode)->seq_no || dent_ino == FILE_MFT) { /* Perfect WIN32/POSIX match. -- Case 1. */ if (!name) { ntfs_debug("Done. (Case 1.)"); return d_splice_alias(dent_inode, dent); } /* * We are too indented. Handle imperfect * matches and short file names further below. */ goto handle_name; } ntfs_error(vol->sb, "Found stale reference to inode " "0x%lx (reference sequence number = " "0x%x, inode sequence number = 0x%x), " "returning -EIO. Run chkdsk.", dent_ino, MSEQNO(mref), NTFS_I(dent_inode)->seq_no); iput(dent_inode); dent_inode = ERR_PTR(-EIO); } else ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with " "error code %li.", dent_ino, PTR_ERR(dent_inode)); kfree(name); /* Return the error code. */ return (struct dentry *)dent_inode; } /* It is guaranteed that @name is no longer allocated at this point. */ if (MREF_ERR(mref) == -ENOENT) { ntfs_debug("Entry was not found, adding negative dentry."); /* The dcache will handle negative entries. */ d_add(dent, NULL); ntfs_debug("Done."); return NULL; } ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " "code %i.", -MREF_ERR(mref)); return ERR_PTR(MREF_ERR(mref)); // TODO: Consider moving this lot to a separate function! (AIA) handle_name: { MFT_RECORD *m; ntfs_attr_search_ctx *ctx; ntfs_inode *ni = NTFS_I(dent_inode); int err; struct qstr nls_name; nls_name.name = NULL; if (name->type != FILE_NAME_DOS) { /* Case 2. */ ntfs_debug("Case 2."); nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&name->name, name->len, (unsigned char**)&nls_name.name, 0); kfree(name); } else /* if (name->type == FILE_NAME_DOS) */ { /* Case 3. */ FILE_NAME_ATTR *fn; ntfs_debug("Case 3."); kfree(name); /* Find the WIN32 name corresponding to the matched DOS name. */ ni = NTFS_I(dent_inode); m = map_mft_record(ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } do { ATTR_RECORD *a; u32 val_len; err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_error(vol->sb, "Inode corrupt: No WIN32 " "namespace counterpart to DOS " "file name. Run chkdsk."); if (err == -ENOENT) err = -EIO; goto err_out; } /* Consistency checks. */ a = ctx->attr; if (a->non_resident || a->flags) goto eio_err_out; val_len = le32_to_cpu(a->data.resident.value_length); if (le16_to_cpu(a->data.resident.value_offset) + val_len > le32_to_cpu(a->length)) goto eio_err_out; fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu( ctx->attr->data.resident.value_offset)); if ((u32)(fn->file_name_length * sizeof(ntfschar) + sizeof(FILE_NAME_ATTR)) > val_len) goto eio_err_out; } while (fn->file_name_type != FILE_NAME_WIN32); /* Convert the found WIN32 name to current NLS code page. */ nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&fn->file_name, fn->file_name_length, (unsigned char**)&nls_name.name, 0); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); } m = NULL; ctx = NULL; /* Check if a conversion error occurred. */ if ((signed)nls_name.len < 0) { err = (signed)nls_name.len; goto err_out; } nls_name.hash = full_name_hash(nls_name.name, nls_name.len); dent = d_add_ci(dent, dent_inode, &nls_name); kfree(nls_name.name); return dent; eio_err_out: ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk."); err = -EIO; err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(ni); iput(dent_inode); ntfs_error(vol->sb, "Failed, returning error code %i.", err); return ERR_PTR(err); } } /** * Inode operations for directories. */ const struct inode_operations ntfs_dir_inode_ops = { .lookup = ntfs_lookup, /* VFS: Lookup directory. */ }; /** * ntfs_get_parent - find the dentry of the parent of a given directory dentry * @child_dent: dentry of the directory whose parent directory to find * * Find the dentry for the parent directory of the directory specified by the * dentry @child_dent. This function is called from * fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the * default ->decode_fh() which is export_decode_fh() in the same file. * * The code is based on the ext3 ->get_parent() implementation found in * fs/ext3/namei.c::ext3_get_parent(). * * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down. * * Return the dentry of the parent directory on success or the error code on * error (IS_ERR() is true). */ static struct dentry *ntfs_get_parent(struct dentry *child_dent) { struct inode *vi = child_dent->d_inode; ntfs_inode *ni = NTFS_I(vi); MFT_RECORD *mrec; ntfs_attr_search_ctx *ctx; ATTR_RECORD *attr; FILE_NAME_ATTR *fn; unsigned long parent_ino; int err; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); /* Get the mft record of the inode belonging to the child dentry. */ mrec = map_mft_record(ni); if (IS_ERR(mrec)) return (struct dentry *)mrec; /* Find the first file name attribute in the mft record. */ ctx = ntfs_attr_get_search_ctx(ni, mrec); if (unlikely(!ctx)) { unmap_mft_record(ni); return ERR_PTR(-ENOMEM); } try_next: err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); if (err == -ENOENT) ntfs_error(vi->i_sb, "Inode 0x%lx does not have a " "file name attribute. Run chkdsk.", vi->i_ino); return ERR_PTR(err); } attr = ctx->attr; if (unlikely(attr->non_resident)) goto try_next; fn = (FILE_NAME_ATTR *)((u8 *)attr + le16_to_cpu(attr->data.resident.value_offset)); if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) > (u8*)attr + le32_to_cpu(attr->length))) goto try_next; /* Get the inode number of the parent directory. */ parent_ino = MREF_LE(fn->parent_directory); /* Release the search context and the mft record of the child. */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); return d_obtain_alias(ntfs_iget(vi->i_sb, parent_ino)); } static struct inode *ntfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; inode = ntfs_iget(sb, ino); if (!IS_ERR(inode)) { if (is_bad_inode(inode) || inode->i_generation != generation) { iput(inode); inode = ERR_PTR(-ESTALE); } } return inode; } static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } /** * Export operations allowing NFS exporting of mounted NTFS partitions. * * We use the default ->encode_fh() for now. Note that they * use 32 bits to store the inode number which is an unsigned long so on 64-bit * architectures is usually 64 bits so it would all fail horribly on huge * volumes. I guess we need to define our own encode and decode fh functions * that store 64-bit inode numbers at some point but for now we will ignore the * problem... * * We also use the default ->get_name() helper (used by ->decode_fh() via * fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs * independent. * * The default ->get_parent() just returns -EACCES so we have to provide our * own and the default ->get_dentry() is incompatible with NTFS due to not * allowing the inode number 0 which is used in NTFS for the system file $MFT * and due to using iget() whereas NTFS needs ntfs_iget(). */ const struct export_operations ntfs_export_ops = { .get_parent = ntfs_get_parent, /* Find the parent of a given directory. */ .fh_to_dentry = ntfs_fh_to_dentry, .fh_to_parent = ntfs_fh_to_parent, };
gpl-2.0
omnirom/android_kernel_samsung_aries
drivers/input/joystick/zhenhua.c
9895
6171
/* * derived from "twidjoy.c" * * Copyright (c) 2008 Martin Kebert * Copyright (c) 2001 Arndt Schoenewald * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2000 Mark Fletcher * */ /* * Driver to use 4CH RC transmitter using Zhen Hua 5-byte protocol (Walkera Lama, * EasyCopter etc.) as a joystick under Linux. * * RC transmitters using Zhen Hua 5-byte protocol are cheap four channels * transmitters for control a RC planes or RC helicopters with possibility to * connect on a serial port. * Data coming from transmitter is in this order: * 1. byte = synchronisation byte * 2. byte = X axis * 3. byte = Y axis * 4. byte = RZ axis * 5. byte = Z axis * (and this is repeated) * * For questions or feedback regarding this driver module please contact: * Martin Kebert <gkmarty@gmail.com> - but I am not a C-programmer nor kernel * coder :-( */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "RC transmitter with 5-byte Zhen Hua protocol joystick driver" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define ZHENHUA_MAX_LENGTH 5 /* * Zhen Hua data. */ struct zhenhua { struct input_dev *dev; int idx; unsigned char data[ZHENHUA_MAX_LENGTH]; char phys[32]; }; /* bits in all incoming bytes needs to be "reversed" */ static int zhenhua_bitreverse(int x) { x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1); x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2); x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4); return x; } /* * zhenhua_process_packet() decodes packets the driver receives from the * RC transmitter. It updates the data accordingly. */ static void zhenhua_process_packet(struct zhenhua *zhenhua) { struct input_dev *dev = zhenhua->dev; unsigned char *data = zhenhua->data; input_report_abs(dev, ABS_Y, data[1]); input_report_abs(dev, ABS_X, data[2]); input_report_abs(dev, ABS_RZ, data[3]); input_report_abs(dev, ABS_Z, data[4]); input_sync(dev); } /* * zhenhua_interrupt() is called by the low level driver when characters * are ready for us. We then buffer them for further processing, or call the * packet processing routine. */ static irqreturn_t zhenhua_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct zhenhua *zhenhua = serio_get_drvdata(serio); /* All Zhen Hua packets are 5 bytes. The fact that the first byte * is allways 0xf7 and all others are in range 0x32 - 0xc8 (50-200) * can be used to check and regain sync. */ if (data == 0xef) zhenhua->idx = 0; /* this byte starts a new packet */ else if (zhenhua->idx == 0) return IRQ_HANDLED; /* wrong MSB -- ignore this byte */ if (zhenhua->idx < ZHENHUA_MAX_LENGTH) zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data); if (zhenhua->idx == ZHENHUA_MAX_LENGTH) { zhenhua_process_packet(zhenhua); zhenhua->idx = 0; } return IRQ_HANDLED; } /* * zhenhua_disconnect() is the opposite of zhenhua_connect() */ static void zhenhua_disconnect(struct serio *serio) { struct zhenhua *zhenhua = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(zhenhua->dev); kfree(zhenhua); } /* * zhenhua_connect() is the routine that is called when someone adds a * new serio device. It looks for the Twiddler, and if found, registers * it as an input device. */ static int zhenhua_connect(struct serio *serio, struct serio_driver *drv) { struct zhenhua *zhenhua; struct input_dev *input_dev; int err = -ENOMEM; zhenhua = kzalloc(sizeof(struct zhenhua), GFP_KERNEL); input_dev = input_allocate_device(); if (!zhenhua || !input_dev) goto fail1; zhenhua->dev = input_dev; snprintf(zhenhua->phys, sizeof(zhenhua->phys), "%s/input0", serio->phys); input_dev->name = "Zhen Hua 5-byte device"; input_dev->phys = zhenhua->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_ZHENHUA; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT(EV_ABS); input_set_abs_params(input_dev, ABS_X, 50, 200, 0, 0); input_set_abs_params(input_dev, ABS_Y, 50, 200, 0, 0); input_set_abs_params(input_dev, ABS_Z, 50, 200, 0, 0); input_set_abs_params(input_dev, ABS_RZ, 50, 200, 0, 0); serio_set_drvdata(serio, zhenhua); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(zhenhua->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(zhenhua); return err; } /* * The serio driver structure. */ static struct serio_device_id zhenhua_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_ZHENHUA, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, zhenhua_serio_ids); static struct serio_driver zhenhua_drv = { .driver = { .name = "zhenhua", }, .description = DRIVER_DESC, .id_table = zhenhua_serio_ids, .interrupt = zhenhua_interrupt, .connect = zhenhua_connect, .disconnect = zhenhua_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init zhenhua_init(void) { return serio_register_driver(&zhenhua_drv); } static void __exit zhenhua_exit(void) { serio_unregister_driver(&zhenhua_drv); } module_init(zhenhua_init); module_exit(zhenhua_exit);
gpl-2.0
eldarerathis/linux-raspberrypi
arch/arm/boot/compressed/ofw-shark.c
12199
5289
/* * linux/arch/arm/boot/compressed/ofw-shark.c * * by Alexander Schulz * * This file is used to get some basic information * about the memory layout of the shark we are running * on. Memory is usually divided in blocks a 8 MB. * And bootargs are copied from OpenFirmware. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/setup.h> #include <asm/page.h> asmlinkage void create_params (unsigned long *buffer) { /* Is there a better address? Also change in mach-shark/core.c */ struct tag *tag = (struct tag *) 0x08003000; int j,i,m,k,nr_banks,size; unsigned char *c; k = 0; /* Head of the taglist */ tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = 1; tag->u.core.pagesize = PAGE_SIZE; tag->u.core.rootdev = 0; /* Build up one tagged block for each memory region */ size=0; nr_banks=(unsigned int) buffer[0]; for (j=0;j<nr_banks;j++){ /* search the lowest address and put it into the next entry */ /* not a fast sort algorithm, but there are at most 8 entries */ /* and this is used only once anyway */ m=0xffffffff; for (i=0;i<(unsigned int) buffer[0];i++){ if (buffer[2*i+1]<m) { m=buffer[2*i+1]; k=i; } } tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = buffer[2*k+2]; tag->u.mem.start = buffer[2*k+1]; size += buffer[2*k+2]; buffer[2*k+1]=0xffffffff; /* mark as copied */ } /* The command line */ tag = tag_next(tag); tag->hdr.tag = ATAG_CMDLINE; c=(unsigned char *)(&buffer[34]); j=0; while (*c) tag->u.cmdline.cmdline[j++]=*c++; tag->u.cmdline.cmdline[j]=0; tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2; /* Hardware revision */ tag = tag_next(tag); tag->hdr.tag = ATAG_REVISION; tag->hdr.size = tag_size(tag_revision); tag->u.revision.rev = ((unsigned char) buffer[33])-'0'; /* End of the taglist */ tag = tag_next(tag); tag->hdr.tag = 0; tag->hdr.size = 0; } typedef int (*ofw_handle_t)(void *); /* Everything below is called with a wrong MMU setting. * This means: no string constants, no initialization of * arrays, no global variables! This is ugly but I didn't * want to write this in assembler :-) */ int of_decode_int(const unsigned char *p) { unsigned int i = *p++ << 8; i = (i + *p++) << 8; i = (i + *p++) << 8; return (i + *p); } int OF_finddevice(ofw_handle_t openfirmware, char *name) { unsigned int args[8]; char service[12]; service[0]='f'; service[1]='i'; service[2]='n'; service[3]='d'; service[4]='d'; service[5]='e'; service[6]='v'; service[7]='i'; service[8]='c'; service[9]='e'; service[10]='\0'; args[0]=(unsigned int)service; args[1]=1; args[2]=1; args[3]=(unsigned int)name; if (openfirmware(args) == -1) return -1; return args[4]; } int OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop) { unsigned int args[8]; char service[12]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='l'; service[8]='e'; service[9]='n'; service[10]='\0'; args[0] = (unsigned int)service; args[1] = 2; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; if (openfirmware(args) == -1) return -1; return args[5]; } int OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen) { unsigned int args[8]; char service[8]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='\0'; args[0] = (unsigned int)service; args[1] = 4; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; args[5] = (unsigned int)buf; args[6] = buflen; if (openfirmware(args) == -1) return -1; return args[7]; } asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer) { int phandle,i,mem_len,buffer[32]; char temp[15]; temp[0]='/'; temp[1]='m'; temp[2]='e'; temp[3]='m'; temp[4]='o'; temp[5]='r'; temp[6]='y'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='r'; temp[1]='e'; temp[2]='g'; temp[3]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); *nomr=mem_len >> 3; for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]); temp[0]='/'; temp[1]='c'; temp[2]='h'; temp[3]='o'; temp[4]='s'; temp[5]='e'; temp[6]='n'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='o'; temp[2]='o'; temp[3]='t'; temp[4]='a'; temp[5]='r'; temp[6]='g'; temp[7]='s'; temp[8]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); if (mem_len > 128) mem_len=128; for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i]; pointer[i+33]=0; temp[0]='/'; temp[1]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='a'; temp[2]='n'; temp[3]='n'; temp[4]='e'; temp[5]='r'; temp[6]='-'; temp[7]='n'; temp[8]='a'; temp[9]='m'; temp[10]='e'; temp[11]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); * ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2]; }
gpl-2.0
cristianomatos/android_kernel_oneplus_msm8974
arch/arm/boot/compressed/ofw-shark.c
12199
5289
/* * linux/arch/arm/boot/compressed/ofw-shark.c * * by Alexander Schulz * * This file is used to get some basic information * about the memory layout of the shark we are running * on. Memory is usually divided in blocks a 8 MB. * And bootargs are copied from OpenFirmware. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/setup.h> #include <asm/page.h> asmlinkage void create_params (unsigned long *buffer) { /* Is there a better address? Also change in mach-shark/core.c */ struct tag *tag = (struct tag *) 0x08003000; int j,i,m,k,nr_banks,size; unsigned char *c; k = 0; /* Head of the taglist */ tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = 1; tag->u.core.pagesize = PAGE_SIZE; tag->u.core.rootdev = 0; /* Build up one tagged block for each memory region */ size=0; nr_banks=(unsigned int) buffer[0]; for (j=0;j<nr_banks;j++){ /* search the lowest address and put it into the next entry */ /* not a fast sort algorithm, but there are at most 8 entries */ /* and this is used only once anyway */ m=0xffffffff; for (i=0;i<(unsigned int) buffer[0];i++){ if (buffer[2*i+1]<m) { m=buffer[2*i+1]; k=i; } } tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = buffer[2*k+2]; tag->u.mem.start = buffer[2*k+1]; size += buffer[2*k+2]; buffer[2*k+1]=0xffffffff; /* mark as copied */ } /* The command line */ tag = tag_next(tag); tag->hdr.tag = ATAG_CMDLINE; c=(unsigned char *)(&buffer[34]); j=0; while (*c) tag->u.cmdline.cmdline[j++]=*c++; tag->u.cmdline.cmdline[j]=0; tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2; /* Hardware revision */ tag = tag_next(tag); tag->hdr.tag = ATAG_REVISION; tag->hdr.size = tag_size(tag_revision); tag->u.revision.rev = ((unsigned char) buffer[33])-'0'; /* End of the taglist */ tag = tag_next(tag); tag->hdr.tag = 0; tag->hdr.size = 0; } typedef int (*ofw_handle_t)(void *); /* Everything below is called with a wrong MMU setting. * This means: no string constants, no initialization of * arrays, no global variables! This is ugly but I didn't * want to write this in assembler :-) */ int of_decode_int(const unsigned char *p) { unsigned int i = *p++ << 8; i = (i + *p++) << 8; i = (i + *p++) << 8; return (i + *p); } int OF_finddevice(ofw_handle_t openfirmware, char *name) { unsigned int args[8]; char service[12]; service[0]='f'; service[1]='i'; service[2]='n'; service[3]='d'; service[4]='d'; service[5]='e'; service[6]='v'; service[7]='i'; service[8]='c'; service[9]='e'; service[10]='\0'; args[0]=(unsigned int)service; args[1]=1; args[2]=1; args[3]=(unsigned int)name; if (openfirmware(args) == -1) return -1; return args[4]; } int OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop) { unsigned int args[8]; char service[12]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='l'; service[8]='e'; service[9]='n'; service[10]='\0'; args[0] = (unsigned int)service; args[1] = 2; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; if (openfirmware(args) == -1) return -1; return args[5]; } int OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen) { unsigned int args[8]; char service[8]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='\0'; args[0] = (unsigned int)service; args[1] = 4; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; args[5] = (unsigned int)buf; args[6] = buflen; if (openfirmware(args) == -1) return -1; return args[7]; } asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer) { int phandle,i,mem_len,buffer[32]; char temp[15]; temp[0]='/'; temp[1]='m'; temp[2]='e'; temp[3]='m'; temp[4]='o'; temp[5]='r'; temp[6]='y'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='r'; temp[1]='e'; temp[2]='g'; temp[3]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); *nomr=mem_len >> 3; for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]); temp[0]='/'; temp[1]='c'; temp[2]='h'; temp[3]='o'; temp[4]='s'; temp[5]='e'; temp[6]='n'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='o'; temp[2]='o'; temp[3]='t'; temp[4]='a'; temp[5]='r'; temp[6]='g'; temp[7]='s'; temp[8]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); if (mem_len > 128) mem_len=128; for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i]; pointer[i+33]=0; temp[0]='/'; temp[1]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='a'; temp[2]='n'; temp[3]='n'; temp[4]='e'; temp[5]='r'; temp[6]='-'; temp[7]='n'; temp[8]='a'; temp[9]='m'; temp[10]='e'; temp[11]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); * ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2]; }
gpl-2.0